diff --git a/NOTICE.txt b/NOTICE.txt index 03e5abc26c..615fb544ea 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -1,4 +1,4 @@ -Copyright 2018-2024 Elasticsearch BV +Copyright 2018-2025 Elasticsearch BV This product includes software developed by The Apache Software Foundation (http://www.apache.org/). diff --git a/VERSION b/VERSION index c68ccd5185..edcfe40d19 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.14.0-SNAPSHOT +2.14.0 diff --git a/deploy/eck-operator/Chart.yaml b/deploy/eck-operator/Chart.yaml index 16b0329f54..b58c45d68c 100644 --- a/deploy/eck-operator/Chart.yaml +++ b/deploy/eck-operator/Chart.yaml @@ -10,9 +10,9 @@ home: https://github.com/elastic/cloud-on-k8s type: application -version: 2.14.0-SNAPSHOT +version: 2.14.0 -appVersion: 2.14.0-SNAPSHOT +appVersion: 2.14.0 kubeVersion: ">=1.21.0-0" @@ -32,5 +32,5 @@ maintainers: dependencies: - name: eck-operator-crds - version: 2.14.0-SNAPSHOT + version: 2.14.0 condition: installCRDs diff --git a/deploy/eck-operator/charts/eck-operator-crds/Chart.yaml b/deploy/eck-operator/charts/eck-operator-crds/Chart.yaml index b17269bcf9..c1d49780d0 100644 --- a/deploy/eck-operator/charts/eck-operator-crds/Chart.yaml +++ b/deploy/eck-operator/charts/eck-operator-crds/Chart.yaml @@ -8,9 +8,9 @@ description: ECK operator Custom Resource Definitions type: application -version: 2.14.0-SNAPSHOT +version: 2.14.0 -appVersion: 2.14.0-SNAPSHOT +appVersion: 2.14.0 home: https://github.com/elastic/cloud-on-k8s diff --git a/deploy/eck-operator/templates/tests/statefulset_test.yaml b/deploy/eck-operator/templates/tests/statefulset_test.yaml index e9f0975f61..9f9c3d825f 100644 --- a/deploy/eck-operator/templates/tests/statefulset_test.yaml +++ b/deploy/eck-operator/templates/tests/statefulset_test.yaml @@ -39,8 +39,8 @@ tests: app.kubernetes.io/instance: RELEASE-NAME app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: elastic-operator - app.kubernetes.io/version: 2.14.0-SNAPSHOT - helm.sh/chart: eck-operator-2.14.0-SNAPSHOT + app.kubernetes.io/version: 2.14.0 + helm.sh/chart: eck-operator-2.14.0 key2: value2 - it: should use the specified webhook secret name set: diff --git a/deploy/eck-stack/Chart.yaml b/deploy/eck-stack/Chart.yaml index 2b896a9212..2e4ee3b9e4 100644 --- a/deploy/eck-stack/Chart.yaml +++ b/deploy/eck-stack/Chart.yaml @@ -3,30 +3,30 @@ name: eck-stack description: Elastic Stack managed by the ECK Operator kubeVersion: ">= 1.21.0-0" type: application -version: 0.12.0-SNAPSHOT +version: 0.12.1 dependencies: - name: eck-elasticsearch condition: eck-elasticsearch.enabled - version: "0.12.0-SNAPSHOT" + version: "0.12.1" - name: eck-kibana condition: eck-kibana.enabled - version: "0.12.0-SNAPSHOT" + version: "0.12.1" - name: eck-agent condition: eck-agent.enabled - version: "0.12.0-SNAPSHOT" + version: "0.12.1" - name: eck-fleet-server condition: eck-fleet-server.enabled - version: "0.12.0-SNAPSHOT" + version: "0.12.1" - name: eck-beats condition: eck-beats.enabled - version: "0.12.0-SNAPSHOT" + version: "0.12.1" - name: eck-logstash condition: eck-logstash.enabled - version: "0.12.0-SNAPSHOT" + version: "0.12.1" - name: eck-apm-server condition: eck-apm-server.enabled - version: "0.12.0-SNAPSHOT" + version: "0.12.1" - name: eck-enterprise-search condition: eck-enterprise-search.enabled - version: "0.12.0-SNAPSHOT" + version: "0.12.1" diff --git a/deploy/eck-stack/charts/eck-agent/Chart.yaml b/deploy/eck-stack/charts/eck-agent/Chart.yaml index 0a6bcfe7f6..d28effb4d7 100644 --- a/deploy/eck-stack/charts/eck-agent/Chart.yaml +++ b/deploy/eck-stack/charts/eck-agent/Chart.yaml @@ -3,7 +3,7 @@ name: eck-agent description: Elastic Agent managed by the ECK operator kubeVersion: ">= 1.21.0-0" type: application -version: 0.12.0-SNAPSHOT +version: 0.12.1 sources: - https://github.com/elastic/cloud-on-k8s - https://github.com/elastic/elastic-agent diff --git a/deploy/eck-stack/charts/eck-agent/examples/fleet-agents.yaml b/deploy/eck-stack/charts/eck-agent/examples/fleet-agents.yaml index 8ddbc10ba1..07c6d3447c 100644 --- a/deploy/eck-stack/charts/eck-agent/examples/fleet-agents.yaml +++ b/deploy/eck-stack/charts/eck-agent/examples/fleet-agents.yaml @@ -1,7 +1,7 @@ # The following example should only be used in conjunction with the 'eck-fleet-server' Helm Chart, # and shows how the Agents can be deployed as a daemonset, and controlled by Fleet Server. # -version: 8.15.0-SNAPSHOT +version: 8.15.0 spec: # This must match the name of an Agent policy. diff --git a/deploy/eck-stack/charts/eck-agent/examples/system-integration.yaml b/deploy/eck-stack/charts/eck-agent/examples/system-integration.yaml index a5be1db255..6e510618ae 100644 --- a/deploy/eck-stack/charts/eck-agent/examples/system-integration.yaml +++ b/deploy/eck-stack/charts/eck-agent/examples/system-integration.yaml @@ -1,7 +1,7 @@ # The following example should only be used in Agent "standalone" mode, # and should not be used when Agent is used with Fleet Server. # -version: 8.15.0-SNAPSHOT +version: 8.15.0 spec: elasticsearchRefs: - name: eck-elasticsearch @@ -33,7 +33,7 @@ spec: meta: package: name: system - version: 8.15.0-SNAPSHOT + version: 8.15.0 data_stream: namespace: default streams: diff --git a/deploy/eck-stack/charts/eck-agent/templates/tests/elastic-agent-cluster-role-binding_test.yaml b/deploy/eck-stack/charts/eck-agent/templates/tests/elastic-agent-cluster-role-binding_test.yaml index 94bf62db3e..ca1c9910ca 100644 --- a/deploy/eck-stack/charts/eck-agent/templates/tests/elastic-agent-cluster-role-binding_test.yaml +++ b/deploy/eck-stack/charts/eck-agent/templates/tests/elastic-agent-cluster-role-binding_test.yaml @@ -79,7 +79,7 @@ tests: app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: eck-agent clusterRoleBinding: label - helm.sh/chart: eck-agent-0.12.0-SNAPSHOT + helm.sh/chart: eck-agent-0.12.1 test: label - equal: path: metadata.annotations diff --git a/deploy/eck-stack/charts/eck-agent/templates/tests/elastic-agent-cluster-role_test.yaml b/deploy/eck-stack/charts/eck-agent/templates/tests/elastic-agent-cluster-role_test.yaml index 364d1dc423..5ce2601a25 100644 --- a/deploy/eck-stack/charts/eck-agent/templates/tests/elastic-agent-cluster-role_test.yaml +++ b/deploy/eck-stack/charts/eck-agent/templates/tests/elastic-agent-cluster-role_test.yaml @@ -136,7 +136,7 @@ tests: app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: eck-agent clusterRole: label - helm.sh/chart: eck-agent-0.12.0-SNAPSHOT + helm.sh/chart: eck-agent-0.12.1 test: label - equal: path: metadata.annotations diff --git a/deploy/eck-stack/charts/eck-agent/templates/tests/elastic-agent-service-account_test.yaml b/deploy/eck-stack/charts/eck-agent/templates/tests/elastic-agent-service-account_test.yaml index b84e6fae4d..be52442568 100644 --- a/deploy/eck-stack/charts/eck-agent/templates/tests/elastic-agent-service-account_test.yaml +++ b/deploy/eck-stack/charts/eck-agent/templates/tests/elastic-agent-service-account_test.yaml @@ -49,7 +49,7 @@ tests: app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: eck-agent serviceAccount: label - helm.sh/chart: eck-agent-0.12.0-SNAPSHOT + helm.sh/chart: eck-agent-0.12.1 test: label - equal: path: metadata.annotations diff --git a/deploy/eck-stack/charts/eck-agent/templates/tests/elastic-agent_test.yaml b/deploy/eck-stack/charts/eck-agent/templates/tests/elastic-agent_test.yaml index 4808423eed..b63db3fc46 100644 --- a/deploy/eck-stack/charts/eck-agent/templates/tests/elastic-agent_test.yaml +++ b/deploy/eck-stack/charts/eck-agent/templates/tests/elastic-agent_test.yaml @@ -22,7 +22,7 @@ tests: value: quickstart-eck-agent - equal: path: spec.version - value: 8.15.0-SNAPSHOT + value: 8.15.0 - equal: path: spec.config value: null @@ -51,7 +51,7 @@ tests: app.kubernetes.io/instance: quickstart app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: eck-agent - helm.sh/chart: eck-agent-0.12.0-SNAPSHOT + helm.sh/chart: eck-agent-0.12.1 test: label - equal: path: metadata.annotations diff --git a/deploy/eck-stack/charts/eck-agent/values.yaml b/deploy/eck-stack/charts/eck-agent/values.yaml index 678626fad0..67ad5eede8 100644 --- a/deploy/eck-stack/charts/eck-agent/values.yaml +++ b/deploy/eck-stack/charts/eck-agent/values.yaml @@ -18,7 +18,7 @@ # Version of Elastic Agent. # -version: 8.15.0-SNAPSHOT +version: 8.15.0 # Labels that will be applied to Elastic Agent. # diff --git a/deploy/eck-stack/charts/eck-apm-server/Chart.yaml b/deploy/eck-stack/charts/eck-apm-server/Chart.yaml index 1d067523b4..1dbc2b5275 100644 --- a/deploy/eck-stack/charts/eck-apm-server/Chart.yaml +++ b/deploy/eck-stack/charts/eck-apm-server/Chart.yaml @@ -3,7 +3,7 @@ name: eck-apm-server description: Elastic APM Server managed by the ECK operator kubeVersion: ">= 1.21.0-0" type: application -version: 0.12.0-SNAPSHOT +version: 0.12.1 sources: - https://github.com/elastic/cloud-on-k8s - https://github.com/elastic/apm-server diff --git a/deploy/eck-stack/charts/eck-apm-server/examples/jaeger-with-http-configuration.yaml b/deploy/eck-stack/charts/eck-apm-server/examples/jaeger-with-http-configuration.yaml index 3bc61719ae..d20432531e 100644 --- a/deploy/eck-stack/charts/eck-apm-server/examples/jaeger-with-http-configuration.yaml +++ b/deploy/eck-stack/charts/eck-apm-server/examples/jaeger-with-http-configuration.yaml @@ -1,7 +1,7 @@ --- # Version of APM Server. # -version: 8.15.0-SNAPSHOT +version: 8.15.0 # Count of APM Server replicas to create. # diff --git a/deploy/eck-stack/charts/eck-apm-server/templates/tests/apmserver_test.yaml b/deploy/eck-stack/charts/eck-apm-server/templates/tests/apmserver_test.yaml index df42218739..d04f3c0141 100644 --- a/deploy/eck-stack/charts/eck-apm-server/templates/tests/apmserver_test.yaml +++ b/deploy/eck-stack/charts/eck-apm-server/templates/tests/apmserver_test.yaml @@ -16,7 +16,7 @@ tests: value: quickstart-eck-apm-server - equal: path: spec.version - value: 8.15.0-SNAPSHOT + value: 8.15.0 - it: name override should work properly set: nameOverride: override @@ -62,7 +62,7 @@ tests: app.kubernetes.io/instance: quickstart app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: eck-apm-server - helm.sh/chart: eck-apm-server-0.12.0-SNAPSHOT + helm.sh/chart: eck-apm-server-0.12.1 test: label - equal: path: metadata.annotations @@ -84,7 +84,7 @@ tests: value: quickstart-eck-apm-server - equal: path: spec.version - value: 8.15.0-SNAPSHOT + value: 8.15.0 - equal: path: spec.count value: 1 diff --git a/deploy/eck-stack/charts/eck-apm-server/values.yaml b/deploy/eck-stack/charts/eck-apm-server/values.yaml index 899cb575e2..414c35be84 100644 --- a/deploy/eck-stack/charts/eck-apm-server/values.yaml +++ b/deploy/eck-stack/charts/eck-apm-server/values.yaml @@ -18,7 +18,7 @@ # Version of APM Server. # -version: 8.15.0-SNAPSHOT +version: 8.15.0 # APM Server Docker image to deploy # diff --git a/deploy/eck-stack/charts/eck-beats/Chart.yaml b/deploy/eck-stack/charts/eck-beats/Chart.yaml index ee934b8244..ff59e1c441 100644 --- a/deploy/eck-stack/charts/eck-beats/Chart.yaml +++ b/deploy/eck-stack/charts/eck-beats/Chart.yaml @@ -4,7 +4,7 @@ description: Elastic Beats managed by the ECK operator # Requirement comes from minimum version supported for eck-operator (https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s_supported_versions.html) kubeVersion: ">= 1.20.0-0" type: application -version: 0.12.0-SNAPSHOT +version: 0.12.1 sources: - https://github.com/elastic/cloud-on-k8s - https://github.com/elastic/beats diff --git a/deploy/eck-stack/charts/eck-beats/examples/auditbeat_hosts.yaml b/deploy/eck-stack/charts/eck-beats/examples/auditbeat_hosts.yaml index 44d9de1430..f582917b47 100644 --- a/deploy/eck-stack/charts/eck-beats/examples/auditbeat_hosts.yaml +++ b/deploy/eck-stack/charts/eck-beats/examples/auditbeat_hosts.yaml @@ -1,5 +1,5 @@ name: auditbeat -version: 8.15.0-SNAPSHOT +version: 8.15.0 spec: type: auditbeat elasticsearchRef: diff --git a/deploy/eck-stack/charts/eck-beats/examples/filebeat_no_autodiscover.yaml b/deploy/eck-stack/charts/eck-beats/examples/filebeat_no_autodiscover.yaml index 643da82d9e..4aa53b36ec 100644 --- a/deploy/eck-stack/charts/eck-beats/examples/filebeat_no_autodiscover.yaml +++ b/deploy/eck-stack/charts/eck-beats/examples/filebeat_no_autodiscover.yaml @@ -1,5 +1,5 @@ name: filebeat -version: 8.15.0-SNAPSHOT +version: 8.15.0 spec: type: filebeat elasticsearchRef: diff --git a/deploy/eck-stack/charts/eck-beats/examples/heartbeat_es_kb_health.yaml b/deploy/eck-stack/charts/eck-beats/examples/heartbeat_es_kb_health.yaml index de0af430d0..f2b41c34c2 100644 --- a/deploy/eck-stack/charts/eck-beats/examples/heartbeat_es_kb_health.yaml +++ b/deploy/eck-stack/charts/eck-beats/examples/heartbeat_es_kb_health.yaml @@ -1,5 +1,5 @@ name: heartbeat -version: 8.15.0-SNAPSHOT +version: 8.15.0 spec: type: heartbeat elasticsearchRef: diff --git a/deploy/eck-stack/charts/eck-beats/examples/metricbeat_hosts.yaml b/deploy/eck-stack/charts/eck-beats/examples/metricbeat_hosts.yaml index 6d44a10ac0..c5a487f802 100644 --- a/deploy/eck-stack/charts/eck-beats/examples/metricbeat_hosts.yaml +++ b/deploy/eck-stack/charts/eck-beats/examples/metricbeat_hosts.yaml @@ -1,7 +1,7 @@ name: metricbeat spec: type: metricbeat - version: 8.15.0-SNAPSHOT + version: 8.15.0 elasticsearchRef: name: eck-elasticsearch kibanaRef: diff --git a/deploy/eck-stack/charts/eck-beats/examples/packetbeat_dns_http.yaml b/deploy/eck-stack/charts/eck-beats/examples/packetbeat_dns_http.yaml index 7513c4d7e7..a15b6fa323 100644 --- a/deploy/eck-stack/charts/eck-beats/examples/packetbeat_dns_http.yaml +++ b/deploy/eck-stack/charts/eck-beats/examples/packetbeat_dns_http.yaml @@ -1,7 +1,7 @@ name: packetbeat spec: type: packetbeat - version: 8.15.0-SNAPSHOT + version: 8.15.0 elasticsearchRef: name: eck-elasticsearch kibanaRef: diff --git a/deploy/eck-stack/charts/eck-beats/templates/tests/beats-metricbeat-example_test.yaml b/deploy/eck-stack/charts/eck-beats/templates/tests/beats-metricbeat-example_test.yaml index 28388218c3..00796c8ab0 100644 --- a/deploy/eck-stack/charts/eck-beats/templates/tests/beats-metricbeat-example_test.yaml +++ b/deploy/eck-stack/charts/eck-beats/templates/tests/beats-metricbeat-example_test.yaml @@ -233,7 +233,7 @@ tests: app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: eck-beats serviceAccount: label - helm.sh/chart: eck-beats-0.12.0-SNAPSHOT + helm.sh/chart: eck-beats-0.12.1 test: label - equal: path: metadata.annotations diff --git a/deploy/eck-stack/charts/eck-beats/templates/tests/beats_test.yaml b/deploy/eck-stack/charts/eck-beats/templates/tests/beats_test.yaml index 7f60f7275e..4c83054601 100644 --- a/deploy/eck-stack/charts/eck-beats/templates/tests/beats_test.yaml +++ b/deploy/eck-stack/charts/eck-beats/templates/tests/beats_test.yaml @@ -17,7 +17,7 @@ tests: value: quickstart-eck-beats - equal: path: spec.version - value: 8.15.0-SNAPSHOT + value: 8.15.0 - equal: path: spec.type value: filebeat diff --git a/deploy/eck-stack/charts/eck-beats/values.yaml b/deploy/eck-stack/charts/eck-beats/values.yaml index def2385210..54fd3be512 100644 --- a/deploy/eck-stack/charts/eck-beats/values.yaml +++ b/deploy/eck-stack/charts/eck-beats/values.yaml @@ -18,7 +18,7 @@ # Version of Elastic Beats. # -version: 8.15.0-SNAPSHOT +version: 8.15.0 # Labels that will be applied to Elastic Beats. # diff --git a/deploy/eck-stack/charts/eck-elasticsearch/Chart.yaml b/deploy/eck-stack/charts/eck-elasticsearch/Chart.yaml index 1162f5f26b..e0cf87e3ae 100644 --- a/deploy/eck-stack/charts/eck-elasticsearch/Chart.yaml +++ b/deploy/eck-stack/charts/eck-elasticsearch/Chart.yaml @@ -3,7 +3,7 @@ name: eck-elasticsearch description: Elasticsearch managed by the ECK operator kubeVersion: ">= 1.21.0-0" type: application -version: 0.12.0-SNAPSHOT +version: 0.12.1 sources: - https://github.com/elastic/cloud-on-k8s - https://github.com/elastic/elasticsearch/ diff --git a/deploy/eck-stack/charts/eck-elasticsearch/templates/tests/elasticsearch_test.yaml b/deploy/eck-stack/charts/eck-elasticsearch/templates/tests/elasticsearch_test.yaml index 5b46ccb967..44a4dcbcc9 100644 --- a/deploy/eck-stack/charts/eck-elasticsearch/templates/tests/elasticsearch_test.yaml +++ b/deploy/eck-stack/charts/eck-elasticsearch/templates/tests/elasticsearch_test.yaml @@ -108,7 +108,7 @@ tests: app.kubernetes.io/instance: quickstart app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: eck-elasticsearch - helm.sh/chart: eck-elasticsearch-0.12.0-SNAPSHOT + helm.sh/chart: eck-elasticsearch-0.12.1 test: label - equal: path: metadata.annotations @@ -207,11 +207,11 @@ tests: value: my.regis.try/es:8 - it: should render image properly set: - image: my.registry.com/elastic/elasticsearch:8.15.0-SNAPSHOT + image: my.registry.com/elastic/elasticsearch:8.15.0 asserts: - equal: path: spec.image - value: my.registry.com/elastic/elasticsearch:8.15.0-SNAPSHOT + value: my.registry.com/elastic/elasticsearch:8.15.0 - it: should render no podDisruptionBudget by default set: asserts: diff --git a/deploy/eck-stack/charts/eck-elasticsearch/templates/tests/ingress_test.yaml b/deploy/eck-stack/charts/eck-elasticsearch/templates/tests/ingress_test.yaml index 612d69a460..73b91f34b8 100644 --- a/deploy/eck-stack/charts/eck-elasticsearch/templates/tests/ingress_test.yaml +++ b/deploy/eck-stack/charts/eck-elasticsearch/templates/tests/ingress_test.yaml @@ -56,7 +56,7 @@ tests: app.kubernetes.io/instance: quickstart app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: eck-elasticsearch - helm.sh/chart: eck-elasticsearch-0.12.0-SNAPSHOT + helm.sh/chart: eck-elasticsearch-0.12.1 test: label - equal: path: metadata.annotations diff --git a/deploy/eck-stack/charts/eck-elasticsearch/values.yaml b/deploy/eck-stack/charts/eck-elasticsearch/values.yaml index 15ef5f26e6..3139412b84 100644 --- a/deploy/eck-stack/charts/eck-elasticsearch/values.yaml +++ b/deploy/eck-stack/charts/eck-elasticsearch/values.yaml @@ -18,7 +18,7 @@ # Version of Elasticsearch. # -version: 8.15.0-SNAPSHOT +version: 8.15.0 # Elasticsearch Docker image to deploy # diff --git a/deploy/eck-stack/charts/eck-enterprise-search/Chart.yaml b/deploy/eck-stack/charts/eck-enterprise-search/Chart.yaml index a072dc600c..ab57ae3ef3 100644 --- a/deploy/eck-stack/charts/eck-enterprise-search/Chart.yaml +++ b/deploy/eck-stack/charts/eck-enterprise-search/Chart.yaml @@ -3,7 +3,7 @@ name: eck-enterprise-search description: Elastic Enterprise Search managed by the ECK operator kubeVersion: ">= 1.21.0-0" type: application -version: 0.12.0-SNAPSHOT +version: 0.12.1 sources: - https://github.com/elastic/cloud-on-k8s icon: https://github.com/elastic/ent-search/blob/main/public/app-search-favicon-196x196.png diff --git a/deploy/eck-stack/charts/eck-enterprise-search/templates/tests/entsearch_test.yaml b/deploy/eck-stack/charts/eck-enterprise-search/templates/tests/entsearch_test.yaml index cf1c74dd62..8614e29f99 100644 --- a/deploy/eck-stack/charts/eck-enterprise-search/templates/tests/entsearch_test.yaml +++ b/deploy/eck-stack/charts/eck-enterprise-search/templates/tests/entsearch_test.yaml @@ -16,7 +16,7 @@ tests: value: quickstart-eck-enterprise-search - equal: path: spec.version - value: 8.15.0-SNAPSHOT + value: 8.15.0 - it: name override should work properly set: nameOverride: override @@ -62,7 +62,7 @@ tests: app.kubernetes.io/instance: quickstart app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: eck-enterprise-search - helm.sh/chart: eck-enterprise-search-0.12.0-SNAPSHOT + helm.sh/chart: eck-enterprise-search-0.12.1 test: label - equal: path: metadata.annotations @@ -84,7 +84,7 @@ tests: value: quickstart-eck-enterprise-search - equal: path: spec.version - value: 8.15.0-SNAPSHOT + value: 8.15.0 - equal: path: spec.count value: 1 diff --git a/deploy/eck-stack/charts/eck-enterprise-search/values.yaml b/deploy/eck-stack/charts/eck-enterprise-search/values.yaml index 5b30b428c9..bb1a016927 100644 --- a/deploy/eck-stack/charts/eck-enterprise-search/values.yaml +++ b/deploy/eck-stack/charts/eck-enterprise-search/values.yaml @@ -18,7 +18,7 @@ # Version of Enterprise Search. # -version: 8.15.0-SNAPSHOT +version: 8.15.0 # Enterprise Search Docker image to deploy # diff --git a/deploy/eck-stack/charts/eck-fleet-server/Chart.yaml b/deploy/eck-stack/charts/eck-fleet-server/Chart.yaml index f6ef9d3639..3d5cbefdcc 100644 --- a/deploy/eck-stack/charts/eck-fleet-server/Chart.yaml +++ b/deploy/eck-stack/charts/eck-fleet-server/Chart.yaml @@ -3,7 +3,7 @@ name: eck-fleet-server description: Elastic Fleet Server as an Agent managed by the ECK operator kubeVersion: ">= 1.21.0-0" type: application -version: 0.12.0-SNAPSHOT +version: 0.12.1 sources: - https://github.com/elastic/cloud-on-k8s - https://github.com/elastic/elastic-agent diff --git a/deploy/eck-stack/charts/eck-fleet-server/templates/tests/fleet-server-cluster-role-binding_test.yaml b/deploy/eck-stack/charts/eck-fleet-server/templates/tests/fleet-server-cluster-role-binding_test.yaml index 9d4b8e11a2..c83d7528cd 100644 --- a/deploy/eck-stack/charts/eck-fleet-server/templates/tests/fleet-server-cluster-role-binding_test.yaml +++ b/deploy/eck-stack/charts/eck-fleet-server/templates/tests/fleet-server-cluster-role-binding_test.yaml @@ -49,7 +49,7 @@ tests: app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: eck-fleet-server clusterRoleBinding: label - helm.sh/chart: eck-fleet-server-0.12.0-SNAPSHOT + helm.sh/chart: eck-fleet-server-0.12.1 test: label - equal: path: metadata.annotations diff --git a/deploy/eck-stack/charts/eck-fleet-server/templates/tests/fleet-server-cluster-role_test.yaml b/deploy/eck-stack/charts/eck-fleet-server/templates/tests/fleet-server-cluster-role_test.yaml index a0cd546b77..415dd34761 100644 --- a/deploy/eck-stack/charts/eck-fleet-server/templates/tests/fleet-server-cluster-role_test.yaml +++ b/deploy/eck-stack/charts/eck-fleet-server/templates/tests/fleet-server-cluster-role_test.yaml @@ -88,7 +88,7 @@ tests: app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: eck-fleet-server clusterRole: label - helm.sh/chart: eck-fleet-server-0.12.0-SNAPSHOT + helm.sh/chart: eck-fleet-server-0.12.1 test: label - equal: path: metadata.annotations diff --git a/deploy/eck-stack/charts/eck-fleet-server/templates/tests/fleet-server-service-account_test.yaml b/deploy/eck-stack/charts/eck-fleet-server/templates/tests/fleet-server-service-account_test.yaml index 5292766444..55a31000d1 100644 --- a/deploy/eck-stack/charts/eck-fleet-server/templates/tests/fleet-server-service-account_test.yaml +++ b/deploy/eck-stack/charts/eck-fleet-server/templates/tests/fleet-server-service-account_test.yaml @@ -34,7 +34,7 @@ tests: app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: eck-fleet-server serviceAccount: label - helm.sh/chart: eck-fleet-server-0.12.0-SNAPSHOT + helm.sh/chart: eck-fleet-server-0.12.1 test: label - equal: path: metadata.annotations diff --git a/deploy/eck-stack/charts/eck-fleet-server/templates/tests/fleet-server_test.yaml b/deploy/eck-stack/charts/eck-fleet-server/templates/tests/fleet-server_test.yaml index 74bf89d604..e9b7c5c604 100644 --- a/deploy/eck-stack/charts/eck-fleet-server/templates/tests/fleet-server_test.yaml +++ b/deploy/eck-stack/charts/eck-fleet-server/templates/tests/fleet-server_test.yaml @@ -13,7 +13,7 @@ tests: value: quickstart-eck-fleet-server - equal: path: spec.version - value: 8.15.0-SNAPSHOT + value: 8.15.0 - equal: path: spec.kibanaRef.name value: eck-kibana @@ -46,7 +46,7 @@ tests: app.kubernetes.io/instance: quickstart app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: eck-fleet-server - helm.sh/chart: eck-fleet-server-0.12.0-SNAPSHOT + helm.sh/chart: eck-fleet-server-0.12.1 test: label - equal: path: metadata.annotations diff --git a/deploy/eck-stack/charts/eck-fleet-server/values.yaml b/deploy/eck-stack/charts/eck-fleet-server/values.yaml index cbc4daa9fc..97740faf56 100644 --- a/deploy/eck-stack/charts/eck-fleet-server/values.yaml +++ b/deploy/eck-stack/charts/eck-fleet-server/values.yaml @@ -18,7 +18,7 @@ # Version of Elastic Fleet Server. # -version: 8.15.0-SNAPSHOT +version: 8.15.0 # Labels that will be applied to Elastic Fleet Server. # diff --git a/deploy/eck-stack/charts/eck-kibana/Chart.yaml b/deploy/eck-stack/charts/eck-kibana/Chart.yaml index 1be9bda6d7..c5d2938430 100644 --- a/deploy/eck-stack/charts/eck-kibana/Chart.yaml +++ b/deploy/eck-stack/charts/eck-kibana/Chart.yaml @@ -3,7 +3,7 @@ name: eck-kibana description: Kibana managed by the ECK operator kubeVersion: ">= 1.21.0-0" type: application -version: 0.12.0-SNAPSHOT +version: 0.12.1 sources: - https://github.com/elastic/cloud-on-k8s - https://github.com/elastic/kibana diff --git a/deploy/eck-stack/charts/eck-kibana/examples/http-configuration.yaml b/deploy/eck-stack/charts/eck-kibana/examples/http-configuration.yaml index d0ae4a112c..007553e17f 100644 --- a/deploy/eck-stack/charts/eck-kibana/examples/http-configuration.yaml +++ b/deploy/eck-stack/charts/eck-kibana/examples/http-configuration.yaml @@ -1,7 +1,7 @@ --- # Version of Kibana. # -version: 8.15.0-SNAPSHOT +version: 8.15.0 # Labels that will be applied to Kibana. # diff --git a/deploy/eck-stack/charts/eck-kibana/templates/ingress.yaml b/deploy/eck-stack/charts/eck-kibana/templates/ingress.yaml index 0fa5705e5a..171463c033 100644 --- a/deploy/eck-stack/charts/eck-kibana/templates/ingress.yaml +++ b/deploy/eck-stack/charts/eck-kibana/templates/ingress.yaml @@ -19,7 +19,7 @@ spec: {{- if .Values.ingress.className }} ingressClassName: {{ .Values.ingress.className | quote }} {{- end }} - {{- if .Values.ingress.tls }} + {{- if .Values.ingress.tls.enabled }} tls: - hosts: {{- range .Values.ingress.hosts }} diff --git a/deploy/eck-stack/charts/eck-kibana/templates/tests/ingress_test.yaml b/deploy/eck-stack/charts/eck-kibana/templates/tests/ingress_test.yaml index 9ac56195a1..e781b37950 100644 --- a/deploy/eck-stack/charts/eck-kibana/templates/tests/ingress_test.yaml +++ b/deploy/eck-stack/charts/eck-kibana/templates/tests/ingress_test.yaml @@ -56,7 +56,7 @@ tests: app.kubernetes.io/instance: quickstart app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: eck-kibana - helm.sh/chart: eck-kibana-0.12.0-SNAPSHOT + helm.sh/chart: eck-kibana-0.12.1 test: label - equal: path: metadata.annotations diff --git a/deploy/eck-stack/charts/eck-kibana/templates/tests/kibana_test.yaml b/deploy/eck-stack/charts/eck-kibana/templates/tests/kibana_test.yaml index 427cf98243..c780aed975 100644 --- a/deploy/eck-stack/charts/eck-kibana/templates/tests/kibana_test.yaml +++ b/deploy/eck-stack/charts/eck-kibana/templates/tests/kibana_test.yaml @@ -13,7 +13,7 @@ tests: value: quickstart-eck-kibana - equal: path: spec.version - value: 8.15.0-SNAPSHOT + value: 8.15.0 - it: name override should work properly set: nameOverride: override @@ -53,7 +53,7 @@ tests: app.kubernetes.io/instance: quickstart app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: eck-kibana - helm.sh/chart: eck-kibana-0.12.0-SNAPSHOT + helm.sh/chart: eck-kibana-0.12.1 test: label - equal: path: metadata.annotations @@ -75,7 +75,7 @@ tests: value: quickstart-eck-kibana - equal: path: spec.version - value: 8.15.0-SNAPSHOT + value: 8.15.0 - equal: path: spec.count value: 1 diff --git a/deploy/eck-stack/charts/eck-kibana/values.yaml b/deploy/eck-stack/charts/eck-kibana/values.yaml index 4d22373244..2135b25daf 100644 --- a/deploy/eck-stack/charts/eck-kibana/values.yaml +++ b/deploy/eck-stack/charts/eck-kibana/values.yaml @@ -18,7 +18,7 @@ # Version of Kibana. # -version: 8.15.0-SNAPSHOT +version: 8.15.0 # Labels that will be applied to Kibana. # diff --git a/deploy/eck-stack/charts/eck-logstash/Chart.yaml b/deploy/eck-stack/charts/eck-logstash/Chart.yaml index 6d7a54dbfc..31c908b94a 100644 --- a/deploy/eck-stack/charts/eck-logstash/Chart.yaml +++ b/deploy/eck-stack/charts/eck-logstash/Chart.yaml @@ -3,7 +3,7 @@ name: eck-logstash description: Logstash managed by the ECK operator kubeVersion: ">= 1.21.0-0" type: application -version: 0.12.0-SNAPSHOT +version: 0.12.1 sources: - https://github.com/elastic/cloud-on-k8s - https://github.com/elastic/logstash diff --git a/deploy/eck-stack/charts/eck-logstash/examples/basic-eck.yaml b/deploy/eck-stack/charts/eck-logstash/examples/basic-eck.yaml index 3a341684da..c35665373f 100644 --- a/deploy/eck-stack/charts/eck-logstash/examples/basic-eck.yaml +++ b/deploy/eck-stack/charts/eck-logstash/examples/basic-eck.yaml @@ -1,6 +1,6 @@ --- # values corresponding to config/recipes/logstash/logstash-eck.yaml -version: 8.15.0-SNAPSHOT +version: 8.15.0 elasticsearchRefs: - clusterName: eck diff --git a/deploy/eck-stack/charts/eck-logstash/examples/es-role.yaml b/deploy/eck-stack/charts/eck-logstash/examples/es-role.yaml index 04abd9c14b..ed03d746d7 100644 --- a/deploy/eck-stack/charts/eck-logstash/examples/es-role.yaml +++ b/deploy/eck-stack/charts/eck-logstash/examples/es-role.yaml @@ -1,6 +1,6 @@ --- # values corresponding to config/recipes/logstash/logstash-es-role.yaml -version: 8.15.0-SNAPSHOT +version: 8.15.0 elasticsearchRefs: - clusterName: eck diff --git a/deploy/eck-stack/charts/eck-logstash/examples/monitored.yaml b/deploy/eck-stack/charts/eck-logstash/examples/monitored.yaml index 8b9f92dbb6..235260425f 100644 --- a/deploy/eck-stack/charts/eck-logstash/examples/monitored.yaml +++ b/deploy/eck-stack/charts/eck-logstash/examples/monitored.yaml @@ -1,6 +1,6 @@ --- # values corresponding to config/recipes/logstash/logstash-monitored.yaml -version: 8.15.0-SNAPSHOT +version: 8.15.0 monitoring: metrics: diff --git a/deploy/eck-stack/charts/eck-logstash/examples/multi.yaml b/deploy/eck-stack/charts/eck-logstash/examples/multi.yaml index 1bdc40e5fb..74814998ea 100644 --- a/deploy/eck-stack/charts/eck-logstash/examples/multi.yaml +++ b/deploy/eck-stack/charts/eck-logstash/examples/multi.yaml @@ -1,6 +1,6 @@ --- # values corresponding to config/recipes/logstash/logstash-multi.yaml -version: 8.15.0-SNAPSHOT +version: 8.15.0 pipelines: - pipeline.id: main diff --git a/deploy/eck-stack/charts/eck-logstash/examples/volumes.yaml b/deploy/eck-stack/charts/eck-logstash/examples/volumes.yaml index 5491263bc2..f0e1da3308 100644 --- a/deploy/eck-stack/charts/eck-logstash/examples/volumes.yaml +++ b/deploy/eck-stack/charts/eck-logstash/examples/volumes.yaml @@ -1,6 +1,6 @@ --- # values corresponding to config/recipes/logstash/logstash-volumes.yaml -version: 8.15.0-SNAPSHOT +version: 8.15.0 config: log.level: info diff --git a/deploy/eck-stack/charts/eck-logstash/templates/tests/logstash_test.yaml b/deploy/eck-stack/charts/eck-logstash/templates/tests/logstash_test.yaml index 39b0d1aad3..1e26c8ae1e 100644 --- a/deploy/eck-stack/charts/eck-logstash/templates/tests/logstash_test.yaml +++ b/deploy/eck-stack/charts/eck-logstash/templates/tests/logstash_test.yaml @@ -101,7 +101,7 @@ tests: app.kubernetes.io/instance: quickstart app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: eck-logstash - helm.sh/chart: eck-logstash-0.12.0-SNAPSHOT + helm.sh/chart: eck-logstash-0.12.1 test: label - equal: path: metadata.annotations @@ -160,9 +160,8 @@ tests: - it: should render pipelinesRef properly set: pipelinesRef: - secretRef: - secretName: pipelineLogstashSecretName + secretName: pipelineLogstashSecretName asserts: - equal: - path: spec.pipelinesRef.secretRef.secretName + path: spec.pipelinesRef.secretName value: pipelineLogstashSecretName diff --git a/deploy/eck-stack/charts/eck-logstash/values.yaml b/deploy/eck-stack/charts/eck-logstash/values.yaml index b77d347911..35125203c2 100644 --- a/deploy/eck-stack/charts/eck-logstash/values.yaml +++ b/deploy/eck-stack/charts/eck-logstash/values.yaml @@ -18,7 +18,7 @@ # Version of Logstash. # -version: 8.15.0-SNAPSHOT +version: 8.15.0 # Logstash Docker image to deploy # @@ -53,7 +53,10 @@ count: 1 config: {} configRef: {} -# secretRef: + +# Reference a pipeline configuration in a Secret. +# ref: https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-logstash-configuration.html#k8s-logstash-pipelines +#pipelinesRef: # secretName: '' # Set podTemplate to customize the pod used by Logstash diff --git a/deploy/eck-stack/examples/beats/metricbeat_hosts.yaml b/deploy/eck-stack/examples/beats/metricbeat_hosts.yaml index d2cafcff69..130978eddb 100644 --- a/deploy/eck-stack/examples/beats/metricbeat_hosts.yaml +++ b/deploy/eck-stack/examples/beats/metricbeat_hosts.yaml @@ -7,7 +7,7 @@ eck-elasticsearch: # Version of Elasticsearch. # - version: 8.15.0-SNAPSHOT + version: 8.15.0 nodeSets: - name: default @@ -41,7 +41,7 @@ eck-kibana: # Version of Kibana. # - version: 8.15.0-SNAPSHOT + version: 8.15.0 spec: # Count of Kibana replicas to create. @@ -58,7 +58,7 @@ eck-beats: name: metricbeat spec: type: metricbeat - version: 8.15.0-SNAPSHOT + version: 8.15.0 elasticsearchRef: name: quickstart kibanaRef: diff --git a/deploy/eck-stack/examples/custom-elasticsearch-kibana.yaml b/deploy/eck-stack/examples/custom-elasticsearch-kibana.yaml index df4d5d8a68..ebad06c900 100644 --- a/deploy/eck-stack/examples/custom-elasticsearch-kibana.yaml +++ b/deploy/eck-stack/examples/custom-elasticsearch-kibana.yaml @@ -6,7 +6,7 @@ eck-elasticsearch: # Version of Elasticsearch. # - version: 8.15.0-SNAPSHOT + version: 8.15.0 nodeSets: - name: default @@ -38,7 +38,7 @@ eck-kibana: # Version of Kibana. # - version: 8.15.0-SNAPSHOT + version: 8.15.0 spec: # Count of Kibana replicas to create. diff --git a/deploy/eck-stack/templates/tests/beats_test.yaml b/deploy/eck-stack/templates/tests/beats_test.yaml index 8f28c71a26..ac7abdb85f 100644 --- a/deploy/eck-stack/templates/tests/beats_test.yaml +++ b/deploy/eck-stack/templates/tests/beats_test.yaml @@ -19,7 +19,7 @@ tests: value: quickstart-eck-beats - equal: path: spec.version - value: 8.15.0-SNAPSHOT + value: 8.15.0 - it: should render custom metricbeat example properly values: - ../../examples/beats/metricbeat_hosts.yaml @@ -33,7 +33,7 @@ tests: value: quickstart-eck-beats - equal: path: spec.version - value: 8.15.0-SNAPSHOT + value: 8.15.0 - equal: path: spec.kibanaRef.name value: quickstart diff --git a/deploy/eck-stack/templates/tests/elastic-agent_test.yaml b/deploy/eck-stack/templates/tests/elastic-agent_test.yaml index b6758d108d..81ef1968d2 100644 --- a/deploy/eck-stack/templates/tests/elastic-agent_test.yaml +++ b/deploy/eck-stack/templates/tests/elastic-agent_test.yaml @@ -18,7 +18,7 @@ tests: value: quickstart-eck-agent - equal: path: spec.version - value: 8.15.0-SNAPSHOT + value: 8.15.0 - it: should render agent in custom fleet example properly values: - ../../examples/agent/fleet-agents.yaml @@ -32,7 +32,7 @@ tests: value: quickstart-eck-agent - equal: path: spec.version - value: 8.15.0-SNAPSHOT + value: 8.15.0 - equal: path: spec.kibanaRef.name value: kibana @@ -75,7 +75,7 @@ tests: value: quickstart-eck-fleet-server - equal: path: spec.version - value: 8.15.0-SNAPSHOT + value: 8.15.0 - it: should render fleet server in custom fleet example properly values: - ../../examples/agent/fleet-agents.yaml @@ -89,7 +89,7 @@ tests: value: fleet-server - equal: path: spec.version - value: 8.15.0-SNAPSHOT + value: 8.15.0 - equal: path: spec.kibanaRef.name value: kibana diff --git a/deploy/eck-stack/templates/tests/elasticsearch_test.yaml b/deploy/eck-stack/templates/tests/elasticsearch_test.yaml index e559d5612d..22dd600a36 100644 --- a/deploy/eck-stack/templates/tests/elasticsearch_test.yaml +++ b/deploy/eck-stack/templates/tests/elasticsearch_test.yaml @@ -4,7 +4,7 @@ templates: tests: - it: should render quickstart properly set: - eck-elasticsearch.version: 8.15.0-SNAPSHOT + eck-elasticsearch.version: 8.15.0 eck-kibana.enabled: false release: name: quickstart @@ -16,7 +16,7 @@ tests: value: elasticsearch - equal: path: spec.version - value: 8.15.0-SNAPSHOT + value: 8.15.0 - it: name override should work properly set: eck-elasticsearch.nameOverride: override diff --git a/deploy/eck-stack/templates/tests/kibana_test.yaml b/deploy/eck-stack/templates/tests/kibana_test.yaml index d071a11491..a6d07e5e5f 100644 --- a/deploy/eck-stack/templates/tests/kibana_test.yaml +++ b/deploy/eck-stack/templates/tests/kibana_test.yaml @@ -13,7 +13,7 @@ tests: value: quickstart-eck-kibana - equal: path: spec.version - value: 8.15.0-SNAPSHOT + value: 8.15.0 - it: name override should work properly set: eck-kibana.nameOverride: override @@ -51,7 +51,7 @@ tests: value: quickstart - equal: path: spec.version - value: 8.15.0-SNAPSHOT + value: 8.15.0 - equal: path: spec.count value: 1 diff --git a/docs/advanced-topics/openshift.asciidoc b/docs/advanced-topics/openshift.asciidoc index 645588331b..6d6d372bbb 100644 --- a/docs/advanced-topics/openshift.asciidoc +++ b/docs/advanced-topics/openshift.asciidoc @@ -138,7 +138,7 @@ spec: memory: 1Gi cpu: 1 --- -apiVersion: v1 +apiVersion: route.openshift.io/v1 kind: Route metadata: name: kibana-sample @@ -200,7 +200,7 @@ spec: spec: serviceAccountName: apm-server --- -apiVersion: v1 +apiVersion: route.openshift.io/v1 kind: Route metadata: name: apm-server-sample diff --git a/docs/eck-attributes.asciidoc b/docs/eck-attributes.asciidoc index 2e0f5a6b8b..26a0d02d7a 100644 --- a/docs/eck-attributes.asciidoc +++ b/docs/eck-attributes.asciidoc @@ -1,5 +1,7 @@ -:eck_version: 2.13.0 +:eck_version: 2.14.0 :eck_crd_version: v1 -:eck_release_branch: 2.13 +:eck_release_branch: 2.14 :eck_github: https://github.com/elastic/cloud-on-k8s :eck_resources_list: Elasticsearch, Kibana, APM Server, Enterprise Search, Beats, Elastic Agent, Elastic Maps Server, and Logstash + +:role_mappings_warning: We have identified an issue with Elasticsearch 8.15.1 and 8.15.2 that prevents security role mappings configured via Stack configuration policies to work correctly. Avoid these versions and upgrade to 8.16.0 to remedy this issue if you are affected. diff --git a/docs/operating-eck/eck-permissions.asciidoc b/docs/operating-eck/eck-permissions.asciidoc index 19242d0e69..8d66a9b1eb 100644 --- a/docs/operating-eck/eck-permissions.asciidoc +++ b/docs/operating-eck/eck-permissions.asciidoc @@ -61,7 +61,7 @@ These permissions are needed by the Service Account that ECK operator runs as. |Pod||no|Assuring expected Pods presence during Elasticsearch reconciliation, safely deleting Pods during configuration changes and validating `podTemplate` by dry-run creation of Pods. |Endpoint||no|Checking availability of service endpoints. |Event||no|Emitting events concerning reconciliation progress and issues. -|PersistentVolumeClaim||no|Expanding existing volumes. Check link:https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-volume-claim-templates.html#k8s_updating_the_volume_claim_settings[docs] to learn more. +|PersistentVolumeClaim||no|Expanding existing volumes. Check link:https://www.elastic.co/guide/en/cloud-on-k8s/{eck_release_branch}/k8s-volume-claim-templates.html#k8s_updating_the_volume_claim_settings[docs] to learn more. |Secret||no|Reading/writing configuration, passwords, certificates, and so on. |Service||no|Creating Services fronting Elastic Stack applications. |ConfigMap||no|Reading/writing configuration. @@ -69,7 +69,7 @@ These permissions are needed by the Service Account that ECK operator runs as. |Deployment|apps|no|Deploying Kibana, APM Server, EnterpriseSearch, Maps, Beats or Elastic Agent. |DaemonSet|apps|no|Deploying Beats or Elastic Agent. |PodDisruptionBudget|policy|no|Ensuring update safety for Elasticsearch. Check link:https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-pod-disruption-budget.html[docs] to learn more. -|StorageClass|storage.k8s.io|yes|Validating storage expansion support. Check link:https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-volume-claim-templates.html#k8s_updating_the_volume_claim_settings[docs] to learn more. +|StorageClass|storage.k8s.io|yes|Validating storage expansion support. Check link:https://www.elastic.co/guide/en/cloud-on-k8s/{eck_release_branch}/k8s-volume-claim-templates.html#k8s_updating_the_volume_claim_settings[docs] to learn more. |coreauthorization.k8s.io|SubjectAccessReview|yes|Controlling access between referenced resources. Check link:https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-restrict-cross-namespace-associations.html[docs] to learn more. |=== diff --git a/docs/operating-eck/licensing.asciidoc b/docs/operating-eck/licensing.asciidoc index c8e4fc8e61..91d8a881bf 100644 --- a/docs/operating-eck/licensing.asciidoc +++ b/docs/operating-eck/licensing.asciidoc @@ -105,13 +105,23 @@ The operator periodically writes the total amount of Elastic resources under man ---- > kubectl -n elastic-system get configmap elastic-licensing -o json | jq .data { + "apm_memory": "0.50GiB", + "apm_memory_bytes": "536870912", + "eck_license_expiry_date": "2025-01-01T00:59:59+01:00", "eck_license_level": "enterprise", - "eck_license_expiry_date": "2022-01-01T00:59:59+01:00", + "elasticsearch_memory": "18.00GiB", + "elasticsearch_memory_bytes": "19327352832", "enterprise_resource_units": "1", - "max_enterprise_resource_units": "10", - "timestamp": "2020-01-03T23:38:20Z", - "total_managed_memory": "64GiB", - "total_managed_memory_bytes": "68719476736" + "enterprise_search_memory": "4.00GiB", + "enterprise_search_memory_bytes": "4294967296", + "kibana_memory": "1.00GiB", + "kibana_memory_bytes": "1073741824", + "logstash_memory": "2.00GiB", + "logstash_memory_bytes": "2147483648", + "max_enterprise_resource_units": "250", + "timestamp": "2024-07-26T12:40:42+02:00", + "total_managed_memory": "25.50GiB", + "total_managed_memory_bytes": "27380416512" } ---- @@ -120,12 +130,30 @@ If the operator metrics endpoint is enabled with the `--metrics-port` flag (chec [source,shell] ---- > curl "$ECK_METRICS_ENDPOINT" | grep elastic_licensing +# HELP elastic_licensing_enterprise_resource_units_max Maximum number of enterprise resource units available +# TYPE elastic_licensing_enterprise_resource_units_max gauge +elastic_licensing_enterprise_resource_units_max{license_level="enterprise"} 250 # HELP elastic_licensing_enterprise_resource_units_total Total enterprise resource units used # TYPE elastic_licensing_enterprise_resource_units_total gauge -elastic_licensing_enterprise_resource_units_total{license_level="basic"} 6 -# HELP elastic_licensing_memory_gigabytes_total Total memory used in GB -# TYPE elastic_licensing_memory_gigabytes_total gauge -elastic_licensing_memory_gigabytes_total{license_level="basic"} 357.01915648 +elastic_licensing_enterprise_resource_units_total{license_level="enterprise"} 1 +# HELP elastic_licensing_memory_gibibytes_apm Memory used by APM server in GiB +# TYPE elastic_licensing_memory_gibibytes_apm gauge +elastic_licensing_memory_gibibytes_apm{license_level="enterprise"} 0.5 +# HELP elastic_licensing_memory_gibibytes_elasticsearch Memory used by Elasticsearch in GiB +# TYPE elastic_licensing_memory_gibibytes_elasticsearch gauge +elastic_licensing_memory_gibibytes_elasticsearch{license_level="enterprise"} 18 +# HELP elastic_licensing_memory_gibibytes_enterprise_search Memory used by Enterprise Search in GiB +# TYPE elastic_licensing_memory_gibibytes_enterprise_search gauge +elastic_licensing_memory_gibibytes_enterprise_search{license_level="enterprise"} 4 +# HELP elastic_licensing_memory_gibibytes_kibana Memory used by Kibana in GiB +# TYPE elastic_licensing_memory_gibibytes_kibana gauge +elastic_licensing_memory_gibibytes_kibana{license_level="enterprise"} 1 +# HELP elastic_licensing_memory_gibibytes_logstash Memory used by Logstash in GiB +# TYPE elastic_licensing_memory_gibibytes_logstash gauge +elastic_licensing_memory_gibibytes_logstash{license_level="enterprise"} 2 +# HELP elastic_licensing_memory_gibibytes_total Total memory used in GiB +# TYPE elastic_licensing_memory_gibibytes_total gauge +elastic_licensing_memory_gibibytes_total{license_level="enterprise"} 25.5 ---- NOTE: Logstash resources managed by ECK will be counted towards ERU usage for informational purposes. Billable consumption depends on license terms on a per customer basis (See link:https://www.elastic.co/agreements/global/self-managed[Self Managed Subscription Agreement]) diff --git a/docs/operating-eck/troubleshooting/common-problems.asciidoc b/docs/operating-eck/troubleshooting/common-problems.asciidoc index 16842157bb..b82052da5e 100644 --- a/docs/operating-eck/troubleshooting/common-problems.asciidoc +++ b/docs/operating-eck/troubleshooting/common-problems.asciidoc @@ -260,3 +260,65 @@ If you accidentally upgrade one of your Elasticsearch clusters to a version that The reason for this validation is that ECK will not allow downgrades as this is not supported by Elasticsearch and once the data directory of Elasticsearch has been upgraded there is no way back to the old version without a link:https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-upgrade.html[snapshot restore]. These two upgrading scenarios, however, are exceptions because Elasticsearch never started up successfully. If you annotate the Elasticsearch resource with `eck.k8s.elastic.co/disable-downgrade-validation=true` ECK allows you to go back to the old version at your own risk. If you also attempted an upgrade of other related Elastic Stack applications at the same time you can use the same annotation to go back. Remove the annotation afterwards to prevent accidental downgrades and reduced availability. + +[id="{p}-{page_id}-815-reconfigure-role-mappings"] +== Reconfigure stack config policy based role mappings after an upgrade to 8.15.3 from 8.14.x or 8.15.x + +You have role mappings defined in a StackConfigPolicy, and you upgraded from 8.14.x or 8.15.x, to 8.15.3. + +Examples: +- 8.14.2 -> 8.15.2 -> 8.15.3 +- 8.14.2 -> 8.15.3 +- 8.15.2 -> 8.15.3 + +The best option is to upgrade to 8.16.0, to fix the problem automatically. If this is not possible and you are stuck on 8.15.3, you have to perform two manual steps in order to correctly reconfigure role mappings because due to a bug the role mappings were duplicated. + +. Force reload the StackConfigPolicy configuration + +Force reload the StackConfigPolicy configuration containing the role mappings definition, by adding metadata to any of the mappings: + +[source,yaml] +---- +apiVersion: stackconfigpolicy.k8s.elastic.co/v1alpha1 +kind: StackConfigPolicy +spec: + elasticsearch: + securityRoleMappings: + : + metadata: + force_reload: anything # add a dummy metadata to force reload the config +---- + +Check that the role mapping is now in the cluster state: + +[source,sh] +---- +GET /_cluster/state/metadata?filter_path=metadata.role_mappings.role_mappings +{"metadata":{"role_mappings":{"role_mappings":[{"enabled":true,"roles":["superuser"],"rules":{"all":[{"field":{"realm.name":"oidc1"}},{"field":{"username":"*"}}]},"metadata":{"force_reload":"dummy"}}]}}} +---- + +. Remove duplicated role mappings exposed via the API + +Start by listing all the role mappings defined in your StackConfigPolicy: + +[source,sh] +---- +kubectl get scp -o json | jq '.spec.elasticsearch.securityRoleMappings | to_entries[].key' -r + +---- + +Delete each role: + +[source,sh] +---- +DELETE /_security/role_mapping/ +{"found": true} +---- + +Check that the role mapping was deleted: + +[source,sh] +---- +GET /_security/role_mapping/ +{} +---- \ No newline at end of file diff --git a/docs/operating-eck/upgrading-eck.asciidoc b/docs/operating-eck/upgrading-eck.asciidoc index 0992fc93e9..e41250a996 100644 --- a/docs/operating-eck/upgrading-eck.asciidoc +++ b/docs/operating-eck/upgrading-eck.asciidoc @@ -99,7 +99,7 @@ This will update the ECK installation to the latest binary and update the CRDs a Upgrading the operator results in a one-time update to existing managed resources in the cluster. This potentially triggers a rolling restart of pods by Kubernetes to apply those changes. The following list contains the ECK operator versions that would cause a rolling restart after they have been installed. - 1.6, 1.9, 2.0, 2.1, 2.2, 2.4, 2.5, 2.6, 2.7, 2.8 + 1.6, 1.9, 2.0, 2.1, 2.2, 2.4, 2.5, 2.6, 2.7, 2.8, 2.14 NOTE: Stepping over one of these versions, for example, upgrading ECK from 2.6 to 2.9, still triggers a rolling restart. diff --git a/docs/orchestrating-elastic-stack-applications/agent-standalone.asciidoc b/docs/orchestrating-elastic-stack-applications/agent-standalone.asciidoc index 8e96693563..d497591497 100644 --- a/docs/orchestrating-elastic-stack-applications/agent-standalone.asciidoc +++ b/docs/orchestrating-elastic-stack-applications/agent-standalone.asciidoc @@ -273,7 +273,7 @@ The `elasticsearchRefs` element allows ECK to automatically configure Elastic Ag [id="{p}-elastic-agent-set-output"] === Set manually Elastic Agent outputs -If the `elasticsearchRefs` element is specified, ECK populates the outputs section of the Elastic Agent configuration. ECK creates a user with appropriate roles and permissions and uses its credentials. If required, it also mounts the CA certificate in all Agent Pods, and recreates Pods when this certificate changes. Moreover, `elasticsearchRef` element can refer to an ECK-managed Elasticsearch cluster by filling the `name`, `namespace`, `serviceName` fields accordingly, as well as to a Kubernetes secret that contains the connection information to an Elasticsearch cluster not managed by it. In the latter case, for authenticating against the Elasticsearch cluster the secret must contain the fields of `url` and either the `username` with `password` or the `api-key`. +If the `elasticsearchRefs` element is specified, ECK populates the outputs section of the Elastic Agent configuration. ECK creates a user with appropriate roles and permissions and uses its credentials. If required, it also mounts the CA certificate in all Agent Pods, and recreates Pods when this certificate changes. Moreover, `elasticsearchRef` element can refer to an ECK-managed Elasticsearch cluster by filling the `name`, `namespace`, `serviceName` fields accordingly, as well as to a Kubernetes secret that contains the connection information to an Elasticsearch cluster not managed by it. In the latter case, for authenticating against the Elasticsearch cluster the secret must contain the fields of `url` and either the `username` with `password` or the `api-key`. Refer to <<{p}-connect-to-unmanaged-resources>> for additional details. The outputs can also be set manually. To do that, remove the `elasticsearchRefs` element from the specification and include an appropriate output configuration in the `config`, or indirectly through the `configRef` mechanism. diff --git a/docs/orchestrating-elastic-stack-applications/connect-to-unmanaged-resources.asciidoc b/docs/orchestrating-elastic-stack-applications/connect-to-unmanaged-resources.asciidoc new file mode 100644 index 0000000000..5ebfbb96e5 --- /dev/null +++ b/docs/orchestrating-elastic-stack-applications/connect-to-unmanaged-resources.asciidoc @@ -0,0 +1,84 @@ +:page_id: connect-to-unmanaged-resources +ifdef::env-github[] +**** +link:https://www.elastic.co/guide/en/cloud-on-k8s/master/k8s-{page_id}.html[View this document on the Elastic website] +**** +endif::[] +[id="{p}-{page_id}"] += Connect to external Elastic resources + +Fields like `elasticsearchRef` or `kibanaRef` are useful to automatically establish connections between applications managed by the same ECK operator instance. It is however also possible to connect to applications managed by a different ECK operator instance, or to applications not managed by ECK, for example an Elastic Cloud deployment. This can be done by providing connection details and credentials in a `Secret` through the `secretName` attribute: + +[source,yaml,subs="attributes"] +---- +apiVersion: v1 +kind: Secret +metadata: + name: external-es-ref +stringData: + url: https://sample.gcp.elastic-cloud.com + username: "elastic" + password: REDACTED +--- +apiVersion: kibana.k8s.elastic.co/v1 +kind: Kibana +metadata: + name: kibana-sample +spec: + version: 8.14.0 + count: 1 + elasticsearchRef: + secretName: external-es-ref +---- + +In the case of Elastic Agent you can also specify several named references: + +[source,yaml,subs="attributes"] +---- +apiVersion: v1 +kind: Secret +metadata: + name: external-es-ref +stringData: + url: https://abcd-42.xyz.elastic-cloud.com:443 + username: "" + password: "" + api-key: REDACTED + ca.crt: REDACTED +--- +apiVersion: v1 +kind: Secret +metadata: + name: external-es-ref2 +stringData: + url: https://efgh-43.xyz.elastic-cloud.com:443 + username: "" + password: "" + api-key: REDACTED + ca.crt: REDACTED +--- +apiVersion: agent.k8s.elastic.co/v1alpha1 +kind: Agent +metadata: + name: quickstart +spec: + version: {version} + elasticsearchRefs: + - outputName: default + secretName: external-es-ref + - outputName: monitoring + secretName: external-es-ref2 +---- + +The following fields are expected to be set in the referenced `Secret`: + +* `url` (required): URL to be used to access the external resource. +* `username` (required): The username of the user to be authenticated to the Elastic resource. +* `password` (required): The password for the provided user. +* `ca.crt` (optional): The certificate authority to be used to connect to the external resource. + +In the case of Agent and Beats resources the following field can also be used to connect to Elasticsearch: + +* `api-key`: An API key to authenticate against the Elastic resource. + +NOTE: The operator must be able to connect to the external resources to check version compatibility. diff --git a/docs/orchestrating-elastic-stack-applications/elasticsearch/readiness.asciidoc b/docs/orchestrating-elastic-stack-applications/elasticsearch/readiness.asciidoc index fd54a39f0c..d06201896b 100644 --- a/docs/orchestrating-elastic-stack-applications/elasticsearch/readiness.asciidoc +++ b/docs/orchestrating-elastic-stack-applications/elasticsearch/readiness.asciidoc @@ -8,6 +8,8 @@ endif::[] [id="{p}-{page_id}"] = Readiness probe +== Elasticsearch versions before 8.2.0 + By default, the readiness probe checks that the Pod responds to HTTP requests within a timeout of three seconds. This is acceptable in most cases. However, when the cluster is under heavy load, you might need to increase the timeout. This allows the Pod to stay in a `Ready` state and be part of the Elasticsearch service even if it is responding slowly. To adjust the timeout, set the `READINESS_PROBE_TIMEOUT` environment variable in the Pod template and update the readiness probe configuration with the new timeout. This example describes how to increase the API call timeout to ten seconds and the overall check time to twelve seconds: @@ -42,3 +44,7 @@ spec: ---- Note that this requires restarting the Pods. + +== Elasticsearch versions 8.2.0 and later + +We do not recommend overriding the default readiness probe on Elasticsearch 8.2.0 and later. ECK configures a socket based readiness probe using the Elasticsearch link:https://www.elastic.co/guide/en/elasticsearch/reference/8.19/modules-network.html#readiness-tcp-port[readiness port feature] which is not influenced by the load on the Elasticsearch cluster. \ No newline at end of file diff --git a/docs/orchestrating-elastic-stack-applications/elasticsearch/transport-settings.asciidoc b/docs/orchestrating-elastic-stack-applications/elasticsearch/transport-settings.asciidoc index f47717c0be..67b802e6ba 100644 --- a/docs/orchestrating-elastic-stack-applications/elasticsearch/transport-settings.asciidoc +++ b/docs/orchestrating-elastic-stack-applications/elasticsearch/transport-settings.asciidoc @@ -171,4 +171,6 @@ spec: secretName: root-ca-secret ... ---- -<1> This example, which is meant for illustration purposes only, uses a self-signed issuer as for the root CA and second issuer for the Elasticsearch cluster transport certificates as the cert-manager CSI driver does not support self-signed CAs. \ No newline at end of file +<1> This example uses a self-signed issuer for the root CA and a second issuer for the Elasticsearch cluster transport certificates as the cert-manager CSI driver does not support self-signed CAs. + +When transitioning from a configuration that uses externally provisioned certificates back to ECK-managed self-signed transport certificates it is important to ensure that the externally provisioned CA remains configured as a trusted CA through the `.spec.transport.tls.certificateAuthorities` attribute until all nodes in the cluster have been updated to use the ECK-managed certificates. When transitioning from ECK-managed certificates to externally provisioned ones, ECK ensures automatically that the ECK CA remains configured until the transition has been completed. \ No newline at end of file diff --git a/docs/orchestrating-elastic-stack-applications/enterprise-search.asciidoc b/docs/orchestrating-elastic-stack-applications/enterprise-search.asciidoc index 70eaa23ca3..af45b4d633 100644 --- a/docs/orchestrating-elastic-stack-applications/enterprise-search.asciidoc +++ b/docs/orchestrating-elastic-stack-applications/enterprise-search.asciidoc @@ -333,7 +333,13 @@ The `elasticsearchRef` element allows ECK to automatically configure Enterprise [id="{p}-enterprise-search-connect-non-eck-es"] === Connect to an external Elasticsearch cluster -If you do not want to use the `elasticsearchRef` mechanism or if you want to connect to an Elasticsearch cluster not managed by ECK, you can manually configure Enterprise Search to access any available Elasticsearch cluster: + +==== Automatically + +Refer to <<{p}-connect-to-unmanaged-resources>> to automatically configure Enterprise Search using connection settings from a `Secret`. + +==== Manually +If you do not want to use the `elasticsearchRef` mechanism you can manually configure Enterprise Search to access any available Elasticsearch cluster: [source,yaml,subs="attributes,+macros"] ---- diff --git a/docs/orchestrating-elastic-stack-applications/kibana.asciidoc b/docs/orchestrating-elastic-stack-applications/kibana.asciidoc index 24066c48b5..77d01aa739 100644 --- a/docs/orchestrating-elastic-stack-applications/kibana.asciidoc +++ b/docs/orchestrating-elastic-stack-applications/kibana.asciidoc @@ -61,6 +61,12 @@ The Kibana configuration file is automatically setup by ECK to establish a secur You can also configure Kibana to connect to an Elasticsearch cluster that is managed by a different installation of ECK, or runs outside the Kubernetes cluster. In this case, you need the IP address or URL of the Elasticsearch cluster and a valid username and password pair to access the cluster. +=== Using a Secret + +Refer to <<{p}-connect-to-unmanaged-resources>> to automatically configure Kibana using connection settings from a `Secret`. + +=== Using secure settings + Use the <<{p}-kibana-secure-settings,secure settings>> mechanism to securely store the credentials of the external Elasticsearch cluster: [source,shell] diff --git a/docs/orchestrating-elastic-stack-applications/orchestrating-elastic-stack-applications.asciidoc b/docs/orchestrating-elastic-stack-applications/orchestrating-elastic-stack-applications.asciidoc index 103430e726..8b2fbb0cf6 100644 --- a/docs/orchestrating-elastic-stack-applications/orchestrating-elastic-stack-applications.asciidoc +++ b/docs/orchestrating-elastic-stack-applications/orchestrating-elastic-stack-applications.asciidoc @@ -27,6 +27,7 @@ endif::[] - <<{p}-stateless-autoscaling>> - <<{p}-stack-config-policy>> - <<{p}-upgrading-stack>> +- <<{p}-connect-to-unmanaged-resources>> -- @@ -48,4 +49,4 @@ include::managing-compute-resources.asciidoc[leveloffset=+1] include::autoscaling.asciidoc[leveloffset=+1] include::stack-config-policy.asciidoc[leveloffset=+1] include::upgrading-stack.asciidoc[leveloffset=+1] - +include::connect-to-unmanaged-resources.asciidoc[leveloffset=+1] diff --git a/docs/orchestrating-elastic-stack-applications/security/auth-configs-using-stack-config-policy.asciidoc b/docs/orchestrating-elastic-stack-applications/security/auth-configs-using-stack-config-policy.asciidoc index e7a88ba06c..0bab9938d5 100644 --- a/docs/orchestrating-elastic-stack-applications/security/auth-configs-using-stack-config-policy.asciidoc +++ b/docs/orchestrating-elastic-stack-applications/security/auth-configs-using-stack-config-policy.asciidoc @@ -7,6 +7,8 @@ endif::[] [id="{p}-{page_id}"] = Managing authentication for multiple stacks using Elastic Stack configuration policy +CAUTION: {role_mappings_warning} + NOTE: This requires a valid Enterprise license or Enterprise trial license. Check <<{p}-licensing,the license documentation>> for more details about managing licenses. diff --git a/docs/orchestrating-elastic-stack-applications/security/managing-authentication-for-multiple-stacks/jwt-stack-config-policy.asciidoc b/docs/orchestrating-elastic-stack-applications/security/managing-authentication-for-multiple-stacks/jwt-stack-config-policy.asciidoc index 70b9bd88d5..9668f9038b 100644 --- a/docs/orchestrating-elastic-stack-applications/security/managing-authentication-for-multiple-stacks/jwt-stack-config-policy.asciidoc +++ b/docs/orchestrating-elastic-stack-applications/security/managing-authentication-for-multiple-stacks/jwt-stack-config-policy.asciidoc @@ -9,6 +9,8 @@ endif::[] = JWT using Elastic Stack configuration policy +CAUTION: {role_mappings_warning} + NOTE: This requires a valid Enterprise license or Enterprise trial license. Check <<{p}-licensing,the license documentation>> for more details about managing licenses. TIP: Make sure you check the complete link:https://www.elastic.co/guide/en/elasticsearch/reference/current/jwt-auth-realm.html[guide to setting up JWT with Elasticsearch]. diff --git a/docs/orchestrating-elastic-stack-applications/security/managing-authentication-for-multiple-stacks/ldap-using-stack-config-policy.asciidoc b/docs/orchestrating-elastic-stack-applications/security/managing-authentication-for-multiple-stacks/ldap-using-stack-config-policy.asciidoc index b49d2d9e3b..2ed04a4ae6 100644 --- a/docs/orchestrating-elastic-stack-applications/security/managing-authentication-for-multiple-stacks/ldap-using-stack-config-policy.asciidoc +++ b/docs/orchestrating-elastic-stack-applications/security/managing-authentication-for-multiple-stacks/ldap-using-stack-config-policy.asciidoc @@ -8,6 +8,8 @@ endif::[] [id="{p}-{page_id}"] = LDAP using Elastic stack configuration policy +CAUTION: {role_mappings_warning} + NOTE: This requires a valid Enterprise license or Enterprise trial license. Check <<{p}-licensing,the license documentation>> for more details about managing licenses. TIP: Make sure you check the complete link:https://www.elastic.co/guide/en/elasticsearch/reference/current/ldap-realm.html[guide to setting up LDAP with Elasticsearch]. diff --git a/docs/orchestrating-elastic-stack-applications/security/managing-authentication-for-multiple-stacks/oidc-stack-config-policy.asciidoc b/docs/orchestrating-elastic-stack-applications/security/managing-authentication-for-multiple-stacks/oidc-stack-config-policy.asciidoc index 76960e93b5..7021500236 100644 --- a/docs/orchestrating-elastic-stack-applications/security/managing-authentication-for-multiple-stacks/oidc-stack-config-policy.asciidoc +++ b/docs/orchestrating-elastic-stack-applications/security/managing-authentication-for-multiple-stacks/oidc-stack-config-policy.asciidoc @@ -8,6 +8,8 @@ endif::[] [id="{p}-{page_id}"] = OIDC using Elastic stack configuration policy +CAUTION: {role_mappings_warning} + NOTE: This requires a valid Enterprise license or Enterprise trial license. Check <<{p}-licensing,the license documentation>> for more details about managing licenses. TIP: Make sure you check the complete link:https://www.elastic.co/guide/en/elasticsearch/reference/current/oidc-guide.html[guide to setting up OpenID Connect with Elasticsearch]. diff --git a/docs/orchestrating-elastic-stack-applications/security/saml-authentication.asciidoc b/docs/orchestrating-elastic-stack-applications/security/saml-authentication.asciidoc index 4712181c2f..60317db76a 100644 --- a/docs/orchestrating-elastic-stack-applications/security/saml-authentication.asciidoc +++ b/docs/orchestrating-elastic-stack-applications/security/saml-authentication.asciidoc @@ -46,7 +46,7 @@ spec: idp.entity_id: https://sso.example.com/ idp.metadata.path: /usr/share/elasticsearch/config/saml/idp-saml-metadata.xml order: 2 - sp.acs: https://kibana.example.com/api/security/callback/saml + sp.acs: https://kibana.example.com/api/security/saml/callback sp.entity_id: https://kibana.example.com/ sp.logout: https://kibana.example.com/logout ---- diff --git a/docs/orchestrating-elastic-stack-applications/stack-config-policy.asciidoc b/docs/orchestrating-elastic-stack-applications/stack-config-policy.asciidoc index 1bc3233761..783b49b827 100644 --- a/docs/orchestrating-elastic-stack-applications/stack-config-policy.asciidoc +++ b/docs/orchestrating-elastic-stack-applications/stack-config-policy.asciidoc @@ -7,6 +7,8 @@ endif::[] [id="{p}-{page_id}"] = Elastic Stack configuration policies +CAUTION: {role_mappings_warning} + NOTE: This requires a valid Enterprise license or Enterprise trial license. Check <<{p}-licensing,the license documentation>> for more details about managing licenses. Starting from ECK `2.6.1` and Elasticsearch `8.6.1`, Elastic Stack configuration policies allow you to configure the following settings for Elasticsearch: diff --git a/docs/orchestrating-elastic-stack-applications/upgrading-stack.asciidoc b/docs/orchestrating-elastic-stack-applications/upgrading-stack.asciidoc index 19fb290a06..21221a13bd 100644 --- a/docs/orchestrating-elastic-stack-applications/upgrading-stack.asciidoc +++ b/docs/orchestrating-elastic-stack-applications/upgrading-stack.asciidoc @@ -7,6 +7,8 @@ endif::[] [id="{p}-{page_id}"] = Upgrade the Elastic Stack version +CAUTION: {role_mappings_warning} + The operator can safely perform upgrades to newer versions of the various Elastic Stack resources. Follow the instructions in the link:https://www.elastic.co/guide/en/elastic-stack/current/upgrading-elastic-stack.html[Elasticsearch documentation]. Make sure that your cluster is compatible with the target version, take backups, and follow the specific upgrade instructions for each resource type. When you are ready, modify the `version` field in the resource spec to the desired stack version and the operator will start the upgrade process automatically. diff --git a/docs/release-notes.asciidoc b/docs/release-notes.asciidoc index 7c8e0b2806..46b5d479d7 100644 --- a/docs/release-notes.asciidoc +++ b/docs/release-notes.asciidoc @@ -6,6 +6,7 @@ This section summarizes the changes in each release. +* <> * <> * <> * <> @@ -48,6 +49,7 @@ This section summarizes the changes in each release. -- +include::release-notes/2.14.0.asciidoc[] include::release-notes/2.13.0.asciidoc[] include::release-notes/2.12.1.asciidoc[] include::release-notes/2.12.0.asciidoc[] diff --git a/docs/release-notes/2.14.0.asciidoc b/docs/release-notes/2.14.0.asciidoc new file mode 100644 index 0000000000..9b41c3760c --- /dev/null +++ b/docs/release-notes/2.14.0.asciidoc @@ -0,0 +1,71 @@ +:issue: https://github.com/elastic/cloud-on-k8s/issues/ +:pull: https://github.com/elastic/cloud-on-k8s/pull/ + +[[release-notes-2.14.0]] +== {n} version 2.14.0 + +[[known-issue-short-2.14.0]] +[float] +=== Known issue +Users who have defined a <<{p}-readiness,custom readiness probe>> for Elasticsearch 8.2.0 or later will have to either remove the custom readiness probe before upgrading to 2.14 or if that is not possible have adjust the readiness probe script as documented <> after the upgrade. + + + +[[feature-2.14.0]] +[float] +=== New features + +* Ingress support for Elasticsearch and Kibana Helm Charts {pull}7941[#7941] +* Add option to disable self-signed transport certs {pull}7925[#7925] (issue: {issue}6954[#6954]) + +[[enhancement-2.14.0]] +[float] +=== Enhancements + +* Use Elasticsearch readiness port {pull}7847[#7847] (issue: {issue}7841[#7841]) _Note that this change is also referenced in the bug section as it fixes a bug in the previous implementation of the readiness probe._ +* Handle Serverless version in association versions check {pull}7896[#7896] +* Use hash for secure settings secret updates {pull}7843[#7843] (issue: {issue}7842[#7842]) +* Report memory usage by application {pull}7966[#7966] (issue: {issue}7866[#7866]) + +[[bug-2.14.0]] +[float] +=== Bug fixes + +* Fix Discrepancy between Logstash Helm Chart and docs for pipelinesRef {pull}7958[#7958] (issue: {issue}7957[#7957]) +* Fix Logstash service to preserve user defined labels {pull}7895[#7895] (issue: {issue}7855[#7855]) +* Handle empty NODE_ID in Elasticsearch PreStop hook {pull}7892[#7892] +* Elasticsearch controller: fix panic and dropped error result during node shutdown {pull}7875[#7875] +* Do not log registrations to prevent mapping explosion {pull}7869[#7869] (issue: {issue}7748[#7748]) +* Use Elasticsearch readiness port {pull}7847[#7847] (issue: {issue}7841[#7841]) + +[[docs-2.14.0]] +[float] +=== Documentation improvements + +* Document how to connect to unmanaged resources {pull}7965[#7965] (issue: {issue}6449[#6449]) +* Fix typo on SAML Authentication docs page {pull}7950[#7950] +* [OpenShift] Route apiVersion must be route.openshift.io/v1 {pull}7834[#7834] + +[[nogroup-2.14.0]] +[float] +=== Misc + +* update docker.io/library/golang docker tag to v1.22.5 {pull}7930[#7930] +* update github.com/gkampitakis/go-snaps to v0.5.5 {pull}7947[#7947] +* update github.com/go-logr/logr to v1.4.2 {pull}7850[#7850] +* update github.com/go-test/deep to v1.1.1 {pull}7916[#7916] +* update github.com/google/go-containerregistry to v0.20.1 {pull}7934[#7934] +* update github.com/hashicorp/go-retryablehttp from 0.7.6 to 0.7.7 {pull}7920[#7920] +* update github.com/hashicorp/vault/api to v1.14.0 {pull}7852[#7852] +* update github.com/prometheus/client_golang to v1.19.1 {pull}7796[#7796] +* update github.com/prometheus/common to v0.55.0 {pull}7923[#7923] +* update github.com/sethvargo/go-password to v0.3.1 {pull}7922[#7922] +* update github.com/spf13/cobra to v1.8.1 {pull}7903[#7903] +* update github.com/spf13/viper to v1.19.0 {pull}7864[#7864] +* update golang.org/x/crypto to v0.25.0 {pull}7932[#7932] +* update k8s to v0.30.3 {pull}7946[#7946] +* update module k8s.io/klog/v2 to v2.130.1 {pull}7917[#7917] +* update registry.access.redhat.com/ubi9/ubi-minimal docker tag to v9.4-1134 {pull}7900[#7900] +* update sigs.k8s.io/controller-runtime to v0.18.4 {pull}7882[#7882] +* update sigs.k8s.io/controller-tools to v0.15.0 and k8s to v0.30.0 {pull}7807[#7807] + diff --git a/docs/release-notes/highlights-2.14.0.asciidoc b/docs/release-notes/highlights-2.14.0.asciidoc new file mode 100644 index 0000000000..7b28e6e222 --- /dev/null +++ b/docs/release-notes/highlights-2.14.0.asciidoc @@ -0,0 +1,45 @@ +[[release-highlights-2.14.0]] +== 2.14.0 release highlights + +[[known-issue-2.14.0]] +[float] +=== Known issue +Users who have defined a <<{p}-readiness,custom readiness probe>> for Elasticsearch 8.2.0 or later will have to either remove the custom readiness probe before upgrading to 2.14 or if that is not possible have adjust the readiness probe script as follows after the upgrade: +[source,yaml] +---- +podTemplate: + spec: + containers: + - name: elasticsearch + readinessProbe: + exec: + command: + - bash + - -c + - /mnt/elastic-internal/scripts/readiness-port-script.sh +---- + +[float] +[id="{p}-2140-new-and-notable"] +=== New and notable + +New and notable changes in version 2.14.0 of {n}. Check <> for the full list of changes. + +[float] +[id="{p}-2140-custom-transport-certificate-management"] +=== Custom transport certificate management + +Starting with ECK 2.14.0 it becomes possible to fully delegate the transport certificate management for Elasticsearch to a third party component. When used in conjunction with the cert-manager operator and its CSI driver it also improves the scalability of node sets. Refer to the <<{p}-transport-third-party-tools>> for a complete example. + +[float] +[id="{p}-2140-advanced-readiness-probe"] +=== Advanced readiness probe + +The Elasticsearch containers are now configured to use the link:https://www.elastic.co/guide/en/elasticsearch/reference/8.19/modules-network.html#readiness-tcp-port[Elasticsearch TCP readiness port]. This change also improves cluster upgrade stability by fixing a bug in the upgrade process. + +[float] +[id="{p}-2140-connect-resources-to-serverless"] +=== Connect ECK managed applications to a Serverless Project + +The ECK operator can now detect when a remote service is deployed in link:https://www.elastic.co/elasticsearch/serverless[Elastic Cloud Serverless]. This allows the use of the <<{p}-connect-to-unmanaged-resources>> feature to connect a ECK managed resource to your serverless project. + diff --git a/docs/release-notes/highlights.asciidoc b/docs/release-notes/highlights.asciidoc index d183d5ef29..2f6fe76dee 100644 --- a/docs/release-notes/highlights.asciidoc +++ b/docs/release-notes/highlights.asciidoc @@ -5,6 +5,7 @@ -- This section summarizes the most important changes in each release. For the full list, check <>. +* <> * <> * <> * <> @@ -47,6 +48,7 @@ This section summarizes the most important changes in each release. For the full -- +include::highlights-2.14.0.asciidoc[] include::highlights-2.13.0.asciidoc[] include::highlights-2.12.1.asciidoc[] include::highlights-2.12.0.asciidoc[] diff --git a/hack/operatorhub/config.yaml b/hack/operatorhub/config.yaml index 467efb439f..e4e6c84787 100644 --- a/hack/operatorhub/config.yaml +++ b/hack/operatorhub/config.yaml @@ -1,6 +1,6 @@ -newVersion: 2.14.0-SNAPSHOT +newVersion: 2.14.0 prevVersion: 2.13.0 -stackVersion: 8.15.0-SNAPSHOT +stackVersion: 8.15.0 crds: - name: elasticsearches.elasticsearch.k8s.elastic.co displayName: Elasticsearch Cluster diff --git a/hack/upgrade-test-harness/conf.yaml b/hack/upgrade-test-harness/conf.yaml index 03feb7c081..ccca80ca6e 100644 --- a/hack/upgrade-test-harness/conf.yaml +++ b/hack/upgrade-test-harness/conf.yaml @@ -45,5 +45,5 @@ testParams: operatorVersion: 2.13.0 stackVersion: 8.14.0 - name: upcoming - operatorVersion: 2.14.0-SNAPSHOT + operatorVersion: 2.14.0 stackVersion: 8.15.0-SNAPSHOT diff --git a/pkg/license/aggregator.go b/pkg/license/aggregator.go index 412c9adff4..1cda063edd 100644 --- a/pkg/license/aggregator.go +++ b/pkg/license/aggregator.go @@ -30,16 +30,16 @@ import ( ulog "github.com/elastic/cloud-on-k8s/v2/pkg/utils/log" ) -// Aggregator aggregates the total of resources of all Elastic managed components -type Aggregator struct { +// aggregator aggregates the total of resources of all Elastic managed components +type aggregator struct { client k8s.Client } -type aggregate func(ctx context.Context) (resource.Quantity, error) +type aggregate func(ctx context.Context) (managedMemory, error) -// AggregateMemory aggregates the total memory of all Elastic managed components -func (a Aggregator) AggregateMemory(ctx context.Context) (resource.Quantity, error) { - var totalMemory resource.Quantity +// aggregateMemory aggregates the total memory of all Elastic managed components +func (a aggregator) aggregateMemory(ctx context.Context) (memoryUsage, error) { + usage := newMemoryUsage() for _, f := range []aggregate{ a.aggregateElasticsearchMemory, @@ -50,19 +50,19 @@ func (a Aggregator) AggregateMemory(ctx context.Context) (resource.Quantity, err } { memory, err := f(ctx) if err != nil { - return resource.Quantity{}, err + return memoryUsage{}, err } - totalMemory.Add(memory) + usage.add(memory) } - return totalMemory, nil + return usage, nil } -func (a Aggregator) aggregateElasticsearchMemory(ctx context.Context) (resource.Quantity, error) { +func (a aggregator) aggregateElasticsearchMemory(ctx context.Context) (managedMemory, error) { var esList esv1.ElasticsearchList err := a.client.List(context.Background(), &esList) if err != nil { - return resource.Quantity{}, errors.Wrap(err, "failed to aggregate Elasticsearch memory") + return managedMemory{}, errors.Wrap(err, "failed to aggregate Elasticsearch memory") } var total resource.Quantity @@ -75,7 +75,7 @@ func (a Aggregator) aggregateElasticsearchMemory(ctx context.Context) (resource. nodespec.DefaultMemoryLimits, ) if err != nil { - return resource.Quantity{}, errors.Wrap(err, "failed to aggregate Elasticsearch memory") + return managedMemory{}, errors.Wrap(err, "failed to aggregate Elasticsearch memory") } total.Add(multiply(mem, nodeSet.Count)) @@ -84,14 +84,14 @@ func (a Aggregator) aggregateElasticsearchMemory(ctx context.Context) (resource. } } - return total, nil + return managedMemory{total, elasticsearchKey}, nil } -func (a Aggregator) aggregateEnterpriseSearchMemory(ctx context.Context) (resource.Quantity, error) { +func (a aggregator) aggregateEnterpriseSearchMemory(ctx context.Context) (managedMemory, error) { var entList entv1.EnterpriseSearchList err := a.client.List(context.Background(), &entList) if err != nil { - return resource.Quantity{}, errors.Wrap(err, "failed to aggregate Enterprise Search memory") + return managedMemory{}, errors.Wrap(err, "failed to aggregate Enterprise Search memory") } var total resource.Quantity @@ -103,7 +103,7 @@ func (a Aggregator) aggregateEnterpriseSearchMemory(ctx context.Context) (resour enterprisesearch.DefaultMemoryLimits, ) if err != nil { - return resource.Quantity{}, errors.Wrap(err, "failed to aggregate Enterprise Search memory") + return managedMemory{}, errors.Wrap(err, "failed to aggregate Enterprise Search memory") } total.Add(multiply(mem, ent.Spec.Count)) @@ -111,14 +111,14 @@ func (a Aggregator) aggregateEnterpriseSearchMemory(ctx context.Context) (resour "memory", mem.String(), "count", ent.Spec.Count) } - return total, nil + return managedMemory{total, entSearchKey}, nil } -func (a Aggregator) aggregateKibanaMemory(ctx context.Context) (resource.Quantity, error) { +func (a aggregator) aggregateKibanaMemory(ctx context.Context) (managedMemory, error) { var kbList kbv1.KibanaList err := a.client.List(context.Background(), &kbList) if err != nil { - return resource.Quantity{}, errors.Wrap(err, "failed to aggregate Kibana memory") + return managedMemory{}, errors.Wrap(err, "failed to aggregate Kibana memory") } var total resource.Quantity @@ -130,7 +130,7 @@ func (a Aggregator) aggregateKibanaMemory(ctx context.Context) (resource.Quantit kibana.DefaultMemoryLimits, ) if err != nil { - return resource.Quantity{}, errors.Wrap(err, "failed to aggregate Kibana memory") + return managedMemory{}, errors.Wrap(err, "failed to aggregate Kibana memory") } total.Add(multiply(mem, kb.Spec.Count)) @@ -138,14 +138,14 @@ func (a Aggregator) aggregateKibanaMemory(ctx context.Context) (resource.Quantit "memory", mem.String(), "count", kb.Spec.Count) } - return total, nil + return managedMemory{total, kibanaKey}, nil } -func (a Aggregator) aggregateLogstashMemory(ctx context.Context) (resource.Quantity, error) { +func (a aggregator) aggregateLogstashMemory(ctx context.Context) (managedMemory, error) { var lsList lsv1alpha1.LogstashList err := a.client.List(context.Background(), &lsList) if err != nil { - return resource.Quantity{}, errors.Wrap(err, "failed to aggregate Logstash memory") + return managedMemory{}, errors.Wrap(err, "failed to aggregate Logstash memory") } var total resource.Quantity @@ -157,7 +157,7 @@ func (a Aggregator) aggregateLogstashMemory(ctx context.Context) (resource.Quant logstash.DefaultMemoryLimit, ) if err != nil { - return resource.Quantity{}, errors.Wrap(err, "failed to aggregate Logstash memory") + return managedMemory{}, errors.Wrap(err, "failed to aggregate Logstash memory") } total.Add(multiply(mem, ls.Spec.Count)) @@ -165,14 +165,14 @@ func (a Aggregator) aggregateLogstashMemory(ctx context.Context) (resource.Quant "memory", mem.String(), "count", ls.Spec.Count) } - return total, nil + return managedMemory{total, logstashKey}, nil } -func (a Aggregator) aggregateApmServerMemory(ctx context.Context) (resource.Quantity, error) { +func (a aggregator) aggregateApmServerMemory(ctx context.Context) (managedMemory, error) { var asList apmv1.ApmServerList err := a.client.List(context.Background(), &asList) if err != nil { - return resource.Quantity{}, errors.Wrap(err, "failed to aggregate APM Server memory") + return managedMemory{}, errors.Wrap(err, "failed to aggregate APM Server memory") } var total resource.Quantity @@ -184,7 +184,7 @@ func (a Aggregator) aggregateApmServerMemory(ctx context.Context) (resource.Quan apmserver.DefaultMemoryLimits, ) if err != nil { - return resource.Quantity{}, errors.Wrap(err, "failed to aggregate APM Server memory") + return managedMemory{}, errors.Wrap(err, "failed to aggregate APM Server memory") } total.Add(multiply(mem, as.Spec.Count)) @@ -192,7 +192,7 @@ func (a Aggregator) aggregateApmServerMemory(ctx context.Context) (resource.Quan "memory", mem.String(), "count", as.Spec.Count) } - return total, nil + return managedMemory{total, apmKey}, nil } // containerMemLimits reads the container memory limits from the resource specification with fallback diff --git a/pkg/license/aggregator_test.go b/pkg/license/aggregator_test.go index c2356af13c..e4b7927d0a 100644 --- a/pkg/license/aggregator_test.go +++ b/pkg/license/aggregator_test.go @@ -138,11 +138,20 @@ func TestMemFromNodeOpts(t *testing.T) { func TestAggregator(t *testing.T) { objects := readObjects(t, "testdata/stack.yaml") client := k8s.NewFakeClient(objects...) - aggregator := Aggregator{client: client} + aggregator := aggregator{client: client} - val, err := aggregator.AggregateMemory(context.Background()) + val, err := aggregator.aggregateMemory(context.Background()) require.NoError(t, err) - require.Equal(t, 329.9073486328125, inGiB(val)) + for k, v := range map[string]float64{ + elasticsearchKey: 294.0, + kibanaKey: 5.9073486328125, + apmKey: 2.0, + entSearchKey: 24.0, + logstashKey: 4.0, + } { + require.Equal(t, v, val.appUsage[k].inGiB(), k) + } + require.Equal(t, 329.9073486328125, val.totalMemory.inGiB(), "total") } func readObjects(t *testing.T, filePath string) []client.Object { diff --git a/pkg/license/license.go b/pkg/license/license.go index 9596f2df86..4cd30e6e2f 100644 --- a/pkg/license/license.go +++ b/pkg/license/license.go @@ -37,16 +37,60 @@ const ( Type = "elastic-usage" // GiB represents the number of bytes for 1 GiB GiB = 1024 * 1024 * 1024 + + elasticsearchKey = "elasticsearch" + kibanaKey = "kibana" + apmKey = "apm" + entSearchKey = "enterprise_search" + logstashKey = "logstash" + totalKey = "total_managed" ) +type managedMemory struct { + resource.Quantity + label string +} + +func newManagedMemory(binarySI int64, label string) managedMemory { + return managedMemory{ + Quantity: *resource.NewQuantity(binarySI, resource.BinarySI), + label: label, + } +} + +func (mm managedMemory) inGiB() float64 { + return inGiB(mm.Quantity) +} + +func (mm managedMemory) intoMap(m map[string]string) { + m[mm.label+"_memory"] = fmt.Sprintf("%0.2fGiB", inGiB(mm.Quantity)) + m[mm.label+"_memory_bytes"] = fmt.Sprintf("%d", mm.Quantity.Value()) +} + +type memoryUsage struct { + appUsage map[string]managedMemory + totalMemory managedMemory +} + +func newMemoryUsage() memoryUsage { + return memoryUsage{ + appUsage: map[string]managedMemory{}, + totalMemory: managedMemory{label: totalKey}, + } +} + +func (mu *memoryUsage) add(memory managedMemory) { + mu.appUsage[memory.label] = memory + mu.totalMemory.Add(memory.Quantity) +} + // LicensingInfo represents information about the operator license including the total memory of all Elastic managed // components type LicensingInfo struct { + memoryUsage Timestamp string EckLicenseLevel string EckLicenseExpiryDate *time.Time - TotalManagedMemoryGiB float64 - TotalManagedMemoryBytes int64 MaxEnterpriseResourceUnits int64 EnterpriseResourceUnits int64 } @@ -54,12 +98,14 @@ type LicensingInfo struct { // toMap transforms a LicensingInfo to a map of string, in order to fill in the data of a config map func (li LicensingInfo) toMap() map[string]string { m := map[string]string{ - "timestamp": li.Timestamp, - "eck_license_level": li.EckLicenseLevel, - "total_managed_memory": fmt.Sprintf("%0.2fGiB", li.TotalManagedMemoryGiB), - "total_managed_memory_bytes": fmt.Sprintf("%d", li.TotalManagedMemoryBytes), - "enterprise_resource_units": strconv.FormatInt(li.EnterpriseResourceUnits, 10), + "timestamp": li.Timestamp, + "eck_license_level": li.EckLicenseLevel, + "enterprise_resource_units": strconv.FormatInt(li.EnterpriseResourceUnits, 10), + } + for _, v := range li.appUsage { + v.intoMap(m) } + li.totalMemory.intoMap(m) if li.MaxEnterpriseResourceUnits > 0 { m["max_enterprise_resource_units"] = strconv.FormatInt(li.MaxEnterpriseResourceUnits, 10) @@ -74,7 +120,12 @@ func (li LicensingInfo) toMap() map[string]string { func (li LicensingInfo) ReportAsMetrics() { labels := prometheus.Labels{metrics.LicenseLevelLabel: li.EckLicenseLevel} - metrics.LicensingTotalMemoryGauge.With(labels).Set(li.TotalManagedMemoryGiB) + metrics.LicensingTotalMemoryGauge.With(labels).Set(li.totalMemory.inGiB()) + metrics.LicensingESMemoryGauge.With(labels).Set(li.appUsage[elasticsearchKey].inGiB()) + metrics.LicensingKBMemoryGauge.With(labels).Set(li.appUsage[kibanaKey].inGiB()) + metrics.LicensingAPMMemoryGauge.With(labels).Set(li.appUsage[apmKey].inGiB()) + metrics.LicensingEntSearchMemoryGauge.With(labels).Set(li.appUsage[entSearchKey].inGiB()) + metrics.LicensingLogstashMemoryGauge.With(labels).Set(li.appUsage[logstashKey].inGiB()) metrics.LicensingTotalERUGauge.With(labels).Set(float64(li.EnterpriseResourceUnits)) if li.MaxEnterpriseResourceUnits > 0 { @@ -89,19 +140,18 @@ type LicensingResolver struct { } // ToInfo returns licensing information given the total memory of all Elastic managed components -func (r LicensingResolver) ToInfo(ctx context.Context, totalMemory resource.Quantity) (LicensingInfo, error) { +func (r LicensingResolver) ToInfo(ctx context.Context, memoryUsage memoryUsage) (LicensingInfo, error) { operatorLicense, err := r.getOperatorLicense(ctx) if err != nil { return LicensingInfo{}, err } licensingInfo := LicensingInfo{ + memoryUsage: memoryUsage, Timestamp: time.Now().Format(time.RFC3339), EckLicenseLevel: r.getOperatorLicenseLevel(operatorLicense), EckLicenseExpiryDate: r.getOperatorLicenseExpiry(operatorLicense), - TotalManagedMemoryGiB: inGiB(totalMemory), - TotalManagedMemoryBytes: totalMemory.Value(), - EnterpriseResourceUnits: inEnterpriseResourceUnits(totalMemory), + EnterpriseResourceUnits: inEnterpriseResourceUnits(memoryUsage.totalMemory.Quantity), } // include the max ERUs only for a non trial/basic license diff --git a/pkg/license/license_test.go b/pkg/license/license_test.go index 6a6860fab3..ac94c8af9e 100644 --- a/pkg/license/license_test.go +++ b/pkg/license/license_test.go @@ -17,7 +17,7 @@ func TestToMap(t *testing.T) { dateFixture := time.Date(2021, 11, 03, 0, 0, 0, 0, time.UTC) t.Run("empty_object", func(t *testing.T) { - i := LicensingInfo{} + i := LicensingInfo{memoryUsage: newMemoryUsage()} have := i.toMap() want := map[string]string{ "timestamp": "", @@ -31,24 +31,42 @@ func TestToMap(t *testing.T) { t.Run("complete_object", func(t *testing.T) { i := LicensingInfo{ + memoryUsage: memoryUsage{ + appUsage: map[string]managedMemory{ + elasticsearchKey: newManagedMemory(21474836480, elasticsearchKey), + kibanaKey: newManagedMemory(8589934592, kibanaKey), + apmKey: newManagedMemory(4294967296, apmKey), + entSearchKey: newManagedMemory(17179869184, entSearchKey), + logstashKey: newManagedMemory(17179869184, logstashKey), + }, + totalMemory: newManagedMemory(68719476736, totalKey), + }, Timestamp: "2020-05-28T11:15:31Z", EckLicenseLevel: "enterprise", EckLicenseExpiryDate: &dateFixture, - TotalManagedMemoryGiB: 64, - TotalManagedMemoryBytes: 68719476736, EnterpriseResourceUnits: 1, MaxEnterpriseResourceUnits: 10, } have := i.toMap() want := map[string]string{ - "timestamp": "2020-05-28T11:15:31Z", - "eck_license_level": "enterprise", - "eck_license_expiry_date": "2021-11-03T00:00:00Z", - "total_managed_memory": "64.00GiB", - "total_managed_memory_bytes": "68719476736", - "enterprise_resource_units": "1", - "max_enterprise_resource_units": "10", + "timestamp": "2020-05-28T11:15:31Z", + "eck_license_level": "enterprise", + "eck_license_expiry_date": "2021-11-03T00:00:00Z", + "elasticsearch_memory": "20.00GiB", + "elasticsearch_memory_bytes": "21474836480", + "kibana_memory": "8.00GiB", + "kibana_memory_bytes": "8589934592", + "apm_memory": "4.00GiB", + "apm_memory_bytes": "4294967296", + "enterprise_search_memory": "16.00GiB", + "enterprise_search_memory_bytes": "17179869184", + "logstash_memory": "16.00GiB", + "logstash_memory_bytes": "17179869184", + "total_managed_memory": "64.00GiB", + "total_managed_memory_bytes": "68719476736", + "enterprise_resource_units": "1", + "max_enterprise_resource_units": "10", } assert.Equal(t, want, have) }) diff --git a/pkg/license/reporter.go b/pkg/license/reporter.go index f3dffa8af6..1af11c3c1f 100644 --- a/pkg/license/reporter.go +++ b/pkg/license/reporter.go @@ -21,7 +21,7 @@ const ResourceReporterFrequency = 2 * time.Minute // ResourceReporter aggregates resources of all Elastic components managed by the operator // and reports them in a config map in the form of licensing information type ResourceReporter struct { - aggregator Aggregator + aggregator aggregator licensingResolver LicensingResolver tracer *apm.Tracer } @@ -29,7 +29,7 @@ type ResourceReporter struct { // NewResourceReporter returns a new ResourceReporter func NewResourceReporter(c client.Client, operatorNs string, tracer *apm.Tracer) ResourceReporter { return ResourceReporter{ - aggregator: Aggregator{ + aggregator: aggregator{ client: c, }, licensingResolver: LicensingResolver{ @@ -77,10 +77,10 @@ func (r ResourceReporter) Report(ctx context.Context) error { func (r ResourceReporter) Get(ctx context.Context) (LicensingInfo, error) { span, _ := apm.StartSpan(ctx, "get_license_info", tracing.SpanTypeApp) defer span.End() - totalMemory, err := r.aggregator.AggregateMemory(ctx) + usage, err := r.aggregator.aggregateMemory(ctx) if err != nil { return LicensingInfo{}, err } - return r.licensingResolver.ToInfo(ctx, totalMemory) + return r.licensingResolver.ToInfo(ctx, usage) } diff --git a/pkg/license/reporter_test.go b/pkg/license/reporter_test.go index 1918369e7e..6d78545001 100644 --- a/pkg/license/reporter_test.go +++ b/pkg/license/reporter_test.go @@ -41,8 +41,16 @@ func TestGet(t *testing.T) { require.NoError(t, err) want := LicensingInfo{ - TotalManagedMemoryGiB: 20.00, - TotalManagedMemoryBytes: 21474836480, + memoryUsage: memoryUsage{ + appUsage: map[string]managedMemory{ + elasticsearchKey: newManagedMemory(21474836480, elasticsearchKey), + kibanaKey: newManagedMemory(0, kibanaKey), + apmKey: newManagedMemory(0, apmKey), + entSearchKey: newManagedMemory(0, entSearchKey), + logstashKey: newManagedMemory(0, logstashKey), + }, + totalMemory: managedMemory{Quantity: resource.MustParse("20Gi"), label: totalKey}, + }, EnterpriseResourceUnits: 1, EckLicenseLevel: "basic", } @@ -76,8 +84,16 @@ func TestGet(t *testing.T) { require.NoError(t, err) want := LicensingInfo{ - TotalManagedMemoryGiB: 320.00, - TotalManagedMemoryBytes: 343597383680, + memoryUsage: memoryUsage{ + appUsage: map[string]managedMemory{ + elasticsearchKey: newManagedMemory(343597383680, elasticsearchKey), + kibanaKey: newManagedMemory(0, kibanaKey), + apmKey: newManagedMemory(0, apmKey), + entSearchKey: newManagedMemory(0, entSearchKey), + logstashKey: newManagedMemory(0, logstashKey), + }, + totalMemory: newManagedMemory(343597383680, totalKey), + }, EnterpriseResourceUnits: 5, EckLicenseLevel: "basic", } @@ -110,8 +126,16 @@ func TestGet(t *testing.T) { require.NoError(t, err) want := LicensingInfo{ - TotalManagedMemoryGiB: 208.00, - TotalManagedMemoryBytes: 223338299392, + memoryUsage: memoryUsage{ + appUsage: map[string]managedMemory{ + elasticsearchKey: newManagedMemory(223338299392, elasticsearchKey), + kibanaKey: newManagedMemory(0, kibanaKey), + apmKey: newManagedMemory(0, apmKey), + entSearchKey: newManagedMemory(0, entSearchKey), + logstashKey: newManagedMemory(0, logstashKey), + }, + totalMemory: newManagedMemory(223338299392, totalKey), + }, EnterpriseResourceUnits: 4, EckLicenseLevel: "basic", } @@ -130,8 +154,16 @@ func TestGet(t *testing.T) { require.NoError(t, err) want := LicensingInfo{ - TotalManagedMemoryGiB: 100.00, - TotalManagedMemoryBytes: 107374182400, + memoryUsage: memoryUsage{ + appUsage: map[string]managedMemory{ + elasticsearchKey: newManagedMemory(0, elasticsearchKey), + kibanaKey: newManagedMemory(107374182400, kibanaKey), + apmKey: newManagedMemory(0, apmKey), + entSearchKey: newManagedMemory(0, entSearchKey), + logstashKey: newManagedMemory(0, logstashKey), + }, + totalMemory: newManagedMemory(107374182400, totalKey), + }, EnterpriseResourceUnits: 2, EckLicenseLevel: "basic", } @@ -163,8 +195,16 @@ func TestGet(t *testing.T) { have, err := NewResourceReporter(k8s.NewFakeClient(&kb), operatorNs, nil).Get(context.Background()) require.NoError(t, err) want := LicensingInfo{ - TotalManagedMemoryGiB: 200.00, - TotalManagedMemoryBytes: 214748364800, + memoryUsage: memoryUsage{ + appUsage: map[string]managedMemory{ + elasticsearchKey: newManagedMemory(0, elasticsearchKey), + kibanaKey: newManagedMemory(214748364800, kibanaKey), + apmKey: newManagedMemory(0, apmKey), + entSearchKey: newManagedMemory(0, entSearchKey), + logstashKey: newManagedMemory(0, logstashKey), + }, + totalMemory: newManagedMemory(214748364800, totalKey), + }, EnterpriseResourceUnits: 4, EckLicenseLevel: "basic", } @@ -193,8 +233,16 @@ func TestGet(t *testing.T) { have, err := NewResourceReporter(k8s.NewFakeClient(&kb), operatorNs, nil).Get(context.Background()) require.NoError(t, err) want := LicensingInfo{ - TotalManagedMemoryGiB: 190.73, - TotalManagedMemoryBytes: 204800000000, + memoryUsage: memoryUsage{ + appUsage: map[string]managedMemory{ + elasticsearchKey: newManagedMemory(0, elasticsearchKey), + kibanaKey: newManagedMemory(204800000000, kibanaKey), + apmKey: newManagedMemory(0, apmKey), + entSearchKey: newManagedMemory(0, entSearchKey), + logstashKey: newManagedMemory(0, logstashKey), + }, + totalMemory: newManagedMemory(204800000000, totalKey), + }, EnterpriseResourceUnits: 3, EckLicenseLevel: "basic", } @@ -245,8 +293,14 @@ func Test_Start(t *testing.T) { cm.Data["eck_license_level"] == defaultOperatorLicenseLevel && cm.Data["enterprise_resource_units"] == "2" && cm.Data["total_managed_memory"] == "83.00GiB" && - cm.Data["total_managed_memory_bytes"] == "89120571392" - }, waitFor, tick) + cm.Data["total_managed_memory_bytes"] == "89120571392" && + cm.Data["elasticsearch_memory"] == "80.00GiB" && // 40 * 2Gi + cm.Data["elasticsearch_memory_bytes"] == "85899345920" && + cm.Data["kibana_memory"] == "2.00GiB" && // 2 * 1Gi + cm.Data["kibana_memory_bytes"] == "2147483648" && + cm.Data["apm_memory"] == "1.00GiB" && // 2 * 512Mi + cm.Data["apm_memory_bytes"] == "1073741824" + }, waitFor, tick, "40*ES, 2*KB, 2 *APM") // increase the Elasticsearch nodes count es.Spec.NodeSets[0].Count = 80 @@ -268,7 +322,7 @@ func Test_Start(t *testing.T) { cm.Data["enterprise_resource_units"] == "3" && cm.Data["total_managed_memory"] == "163.00GiB" && cm.Data["total_managed_memory_bytes"] == "175019917312" - }, waitFor, tick) + }, waitFor, tick, "80*ES, 2*KB, 2*APM") startTrial(t, k8sClient) // check that the license level has been updated @@ -287,7 +341,7 @@ func Test_Start(t *testing.T) { cm.Data["enterprise_resource_units"] == "3" && cm.Data["total_managed_memory"] == "163.00GiB" && cm.Data["total_managed_memory_bytes"] == "175019917312" - }, waitFor, tick) + }, waitFor, tick, "trial license") } func startTrial(t *testing.T, k8sClient client.Client) { diff --git a/pkg/utils/metrics/operator.go b/pkg/utils/metrics/operator.go index 1b73b0f4cf..4b940dff10 100644 --- a/pkg/utils/metrics/operator.go +++ b/pkg/utils/metrics/operator.go @@ -52,6 +52,46 @@ var ( Name: "memory_gibibytes_total", Help: "Total memory used in GiB", }, []string{LicenseLevelLabel})) + + // LicensingESMemoryGauge reports the Elasticsearch memory usage for licensing purposes. + LicensingESMemoryGauge = registerGauge(prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: licensingSubsystem, + Name: "memory_gibibytes_elasticsearch", + Help: "Memory used by Elasticsearch in GiB", + }, []string{LicenseLevelLabel})) + + // LicensingKBMemoryGauge reports the Kibana memory usage for licensing purposes. + LicensingKBMemoryGauge = registerGauge(prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: licensingSubsystem, + Name: "memory_gibibytes_kibana", + Help: "Memory used by Kibana in GiB", + }, []string{LicenseLevelLabel})) + + // LicensingAPMMemoryGauge reports the APM server memory usage for licensing purposes. + LicensingAPMMemoryGauge = registerGauge(prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: licensingSubsystem, + Name: "memory_gibibytes_apm", + Help: "Memory used by APM server in GiB", + }, []string{LicenseLevelLabel})) + + // LicensingEntSearchMemoryGauge reports the Enterprise Search memory usage for licensing purposes. + LicensingEntSearchMemoryGauge = registerGauge(prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: licensingSubsystem, + Name: "memory_gibibytes_enterprise_search", + Help: "Memory used by Enterprise Search in GiB", + }, []string{LicenseLevelLabel})) + + // LicensingLogstashMemoryGauge reports the Logstash memory usage for licensing purposes. + LicensingLogstashMemoryGauge = registerGauge(prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: licensingSubsystem, + Name: "memory_gibibytes_logstash", + Help: "Memory used by Logstash in GiB", + }, []string{LicenseLevelLabel})) ) func registerGauge(gauge *prometheus.GaugeVec) *prometheus.GaugeVec { diff --git a/test/e2e/kb/version_upgrade_test.go b/test/e2e/kb/version_upgrade_test.go index d53357c47c..fba6f542b2 100644 --- a/test/e2e/kb/version_upgrade_test.go +++ b/test/e2e/kb/version_upgrade_test.go @@ -30,7 +30,7 @@ func TestVersionUpgradeToLatest7x(t *testing.T) { name := "test-version-upgrade-to-7x" esBuilder := elasticsearch.NewBuilder(name). - WithESMasterDataNodes(1, elasticsearch.DefaultResources). + WithESMasterDataNodes(minClusterSizeFromKibanaVersion(t, dstVersion), elasticsearch.DefaultResources). WithVersion(dstVersion) srcNodeCount := 3 @@ -54,7 +54,7 @@ func TestVersionUpgradeToLatest7x(t *testing.T) { } // perform a Kibana version upgrade and assert that: - // - there was a time were no Kibana pods were ready (when all old version pods were termintated, + // - there was a time when no Kibana pods were ready (when all old version pods were terminated, // but before new version pods were started), and // - at all times all pods had the same Kibana version. test.RunMutationsWhileWatching( @@ -65,6 +65,23 @@ func TestVersionUpgradeToLatest7x(t *testing.T) { ) } +var ( + noAutomaticIndexCreationKibanaVersion = version.MustParse("7.17.23") +) + +// minClusterSizeFromKibanaVersion is a workaround for https://github.com/elastic/kibana/pull/158182 +func minClusterSizeFromKibanaVersion(t *testing.T, to string) int { + t.Helper() + dstVer, err := version.Parse(to) + if err != nil { + t.Fatalf("Failed to parse version '%s': %s", to, err) + } + if dstVer.LT(noAutomaticIndexCreationKibanaVersion) { + return 2 + } + return 1 +} + func TestVersionUpgradeAndRespecToLatest7x(t *testing.T) { srcVersion := test.Ctx().ElasticStackVersion dstVersion := test.LatestReleasedVersion7x @@ -73,7 +90,7 @@ func TestVersionUpgradeAndRespecToLatest7x(t *testing.T) { name := "test-upgrade-and-respec-to-7x" esBuilder := elasticsearch.NewBuilder(name). - WithESMasterDataNodes(1, elasticsearch.DefaultResources). + WithESMasterDataNodes(minClusterSizeFromKibanaVersion(t, dstVersion), elasticsearch.DefaultResources). WithVersion(dstVersion) srcNodeCount := 3 diff --git a/test/e2e/test/version.go b/test/e2e/test/version.go index faccde4b29..9c7f6151ad 100644 --- a/test/e2e/test/version.go +++ b/test/e2e/test/version.go @@ -18,8 +18,7 @@ const ( // LatestReleasedVersion7x is the latest released version for 7.x LatestReleasedVersion7x = "7.17.21" // LatestReleasedVersion8x is the latest release version for 8.x - // TODO: update to latest 8.14.x when released with a fix to https://github.com/elastic/cloud-on-k8s/issues/7878 - LatestReleasedVersion8x = "8.13.2" + LatestReleasedVersion8x = "8.14.3" ) // SkipInvalidUpgrade skips a test that would do an invalid upgrade.