diff --git a/charts/restate-helm/README.md b/charts/restate-helm/README.md index 1d58d1c316..090f15eced 100644 --- a/charts/restate-helm/README.md +++ b/charts/restate-helm/README.md @@ -8,8 +8,26 @@ Helm chart for Restate as a single-node StatefulSet. helm install restate oci://ghcr.io/restatedev/restate-helm --namespace restate --create-namespace ``` +# Resources +Restate's performance is strongly influenced by the CPU and memory available. You can vary the resources in your values file. +The defaults are: + +```yaml +resources: + limits: + cpu: 6 + memory: 8Gi + requests: + cpu: 4 + memory: 8Gi +``` + +The environment variable `RESTATE_ROCKSDB_TOTAL_MEMORY_SIZE` should be set such that it is roughly 75% of the memory available. +In the default helm values, this variable is set to `6Gi`. +Under load, Restate will eventually use the entire RocksDB memory size offered to it. + # Running a replicated cluster -You can find example values for a 3-node replicated cluster in [replicated-values.yaml](./replicated-values.yaml). +You can find example values for a 3-node replicated cluster in [replicated-values.yaml](./replicated-values.yaml). Please ensure you use a version of that file (based on the git tag of the repo) which matches the version of the helm chart you are deploying. ```bash diff --git a/charts/restate-helm/values.yaml b/charts/restate-helm/values.yaml index b993339cbc..81f1ed7c26 100644 --- a/charts/restate-helm/values.yaml +++ b/charts/restate-helm/values.yaml @@ -39,6 +39,10 @@ env: value: json - name: RESTATE_CLUSTER_NAME value: helm-single-node + - name: RESTATE_ROCKSDB_TOTAL_MEMORY_SIZE + # This value should be around 75% of the container memory limit, which defaults to 8Gi below. + # If provisioning restate with a different memory limit, make sure to update this value + value: 6Gi service: type: ClusterIP @@ -46,11 +50,12 @@ service: resources: limits: - cpu: 1 - memory: 3Gi + cpu: 6 + # note the comment above re 'RESTATE_ROCKSDB_TOTAL_MEMORY_SIZE' + memory: 8Gi requests: - cpu: 500m - memory: 1Gi + cpu: 4 + memory: 8Gi storage: # If provided the volume will be mounted with the specified claim @@ -87,4 +92,4 @@ topologySpreadConstraints: # podDisruptionBudget: # maxUnavailable: 1 -nodeSelector: {} \ No newline at end of file +nodeSelector: {}