diff --git a/.github/data/matrix-smoke-oss.json b/.github/data/matrix-smoke-oss.json index c1491e62f8..a9e87bbb1d 100644 --- a/.github/data/matrix-smoke-oss.json +++ b/.github/data/matrix-smoke-oss.json @@ -39,14 +39,14 @@ "label": "policies 1/2", "image": "alpine", "type": "oss", - "marker": "'policies and not policies_rl and not policies_ac and not policies_jwt and not policies_mtls'", + "marker": "'policies and not policies_rl and not policies_ac and not policies_jwt and not policies_mtls and not policies_cache'", "platforms": "linux/arm64, linux/amd64" }, { "label": "policies 2/2", "image": "alpine", "type": "oss", - "marker": "'policies_rl or policies_ac or policies_jwt or policies_mtls or otel'", + "marker": "'policies_rl or policies_ac or policies_jwt or policies_mtls or policies_cache or otel'", "platforms": "linux/arm64, linux/amd64" }, { diff --git a/.github/data/matrix-smoke-plus.json b/.github/data/matrix-smoke-plus.json index 19f7cc1672..3750017dfd 100644 --- a/.github/data/matrix-smoke-plus.json +++ b/.github/data/matrix-smoke-plus.json @@ -67,7 +67,7 @@ "label": "policies 1/3", "image": "ubi-9-plus", "type": "plus", - "marker": "'policies and not policies_ac and not policies_jwt and not policies_mtls and not policies_rl'", + "marker": "'policies and not policies_ac and not policies_jwt and not policies_mtls and not policies_rl and not policies_cache'", "platforms": "linux/arm64, linux/amd64" }, { @@ -81,7 +81,7 @@ "label": "policies 3/3", "image": "ubi-9-plus", "type": "plus", - "marker": "policies_rl", + "marker": "'policies_rl or policies_cache'", "platforms": "linux/arm64, linux/amd64" }, { diff --git a/charts/nginx-ingress/templates/_helpers.tpl b/charts/nginx-ingress/templates/_helpers.tpl index c1700c9fa1..fa964b30b6 100644 --- a/charts/nginx-ingress/templates/_helpers.tpl +++ b/charts/nginx-ingress/templates/_helpers.tpl @@ -352,14 +352,24 @@ List of volumes for controller. {{- if eq (include "nginx-ingress.readOnlyRootFilesystem" .) "true" }} - name: nginx-etc emptyDir: {} +{{- if .Values.controller.cache.enableShared }} +- name: nginx-cache + persistentVolumeClaim: + claimName: {{ .Values.controller.cache.sharedPVCName }} +{{- else }} - name: nginx-cache emptyDir: {} +{{- end }} - name: nginx-lib emptyDir: {} - name: nginx-state emptyDir: {} - name: nginx-log emptyDir: {} +{{- else if .Values.controller.cache.enableShared }} +- name: nginx-cache + persistentVolumeClaim: + claimName: {{ .Values.controller.cache.sharedPVCName }} {{- end }} {{- if .Values.controller.appprotect.v5 }} {{ toYaml .Values.controller.appprotect.volumes }} @@ -419,6 +429,9 @@ volumeMounts: name: nginx-state - mountPath: /var/log/nginx name: nginx-log +{{- else if .Values.controller.cache.enableShared }} +- mountPath: /var/cache/nginx + name: nginx-cache {{- end }} {{- if .Values.controller.appprotect.v5 }} - name: app-protect-bd-config diff --git a/charts/nginx-ingress/templates/controller-service.yaml b/charts/nginx-ingress/templates/controller-service.yaml index 0073813227..2e3c0a186d 100644 --- a/charts/nginx-ingress/templates/controller-service.yaml +++ b/charts/nginx-ingress/templates/controller-service.yaml @@ -65,6 +65,14 @@ spec: {{- end }} selector: {{- include "nginx-ingress.selectorLabels" . | nindent 4 }} + {{- if .Values.controller.service.sessionAffinity.enable }} + sessionAffinity: {{ .Values.controller.service.sessionAffinity.type }} + {{- if eq .Values.controller.service.sessionAffinity.type "ClientIP" }} + sessionAffinityConfig: + clientIP: + timeoutSeconds: {{ .Values.controller.service.sessionAffinity.timeoutSeconds }} + {{- end }} + {{- end }} {{- if .Values.controller.service.externalIPs }} externalIPs: {{ toYaml .Values.controller.service.externalIPs | indent 4 }} diff --git a/charts/nginx-ingress/values.schema.json b/charts/nginx-ingress/values.schema.json index 2acdefa40d..4a4b3ece9d 100644 --- a/charts/nginx-ingress/values.schema.json +++ b/charts/nginx-ingress/values.schema.json @@ -1455,6 +1455,50 @@ "type": "object", "ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.33.1/_definitions.json#/definitions/io.k8s.api.core.v1.ServicePort" } + }, + "sessionAffinity": { + "type": "object", + "default": {}, + "title": "The sessionAffinity Schema", + "required": [], + "properties": { + "enable": { + "type": "boolean", + "default": false, + "title": "Enable session affinity", + "examples": [ + false + ] + }, + "type": { + "type": "string", + "default": "ClientIP", + "title": "Session affinity type", + "enum": [ + "ClientIP" + ], + "examples": [ + "ClientIP" + ] + }, + "timeoutSeconds": { + "type": "integer", + "default": 3600, + "title": "Session affinity timeout in seconds", + "minimum": 1, + "maximum": 86400, + "examples": [ + 3600 + ] + } + }, + "examples": [ + { + "enable": false, + "type": "ClientIP", + "timeoutSeconds": 3600 + } + ] } }, "examples": [ @@ -1483,7 +1527,12 @@ "targetPort": 443, "name": "https" }, - "customPorts": [] + "customPorts": [], + "sessionAffinity": { + "enable": false, + "type": "ClientIP", + "timeoutSeconds": 3600 + } } ] }, diff --git a/charts/nginx-ingress/values.yaml b/charts/nginx-ingress/values.yaml index 8dc7579c25..29f4cd5fa7 100644 --- a/charts/nginx-ingress/values.yaml +++ b/charts/nginx-ingress/values.yaml @@ -164,6 +164,16 @@ controller: ## Sets the log format of Ingress Controller. Options include: glog, json, text logFormat: glog + ## Cache configuration options + cache: + ## Enables shared cache across multiple pods using an external persistent volume + ## When enabled, the /var/cache/nginx directory will be mounted from a PVC instead of using emptyDir + ## User must create and configure a PVC with appropriate access mode + enableShared: false + + ## The name of the PersistentVolumeClaim to use for shared cache, should match the name of the PVC created by the user + sharedPVCName: "nginx-shared-cache" + ## A list of custom ports to expose on the NGINX Ingress Controller pod. Follows the conventional Kubernetes yaml syntax for container ports. customPorts: [] @@ -502,6 +512,15 @@ controller: ## A list of custom ports to expose through the Ingress Controller service. Follows the conventional Kubernetes yaml syntax for service ports. customPorts: [] + ## Session affinity configuration for the Ingress Controller service, ensures requests from the same client IP go to the same pod + sessionAffinity: + ## Enable session affinity. Valid values: None, ClientIP + enable: false + ## Session affinity type. Currently only ClientIP is supported. + type: ClientIP + ## Session affinity timeout in seconds (default: 3600 = 1 hour) + timeoutSeconds: 3600 + serviceAccount: ## The annotations of the service account of the Ingress Controller pods. annotations: {} diff --git a/config/crd/bases/k8s.nginx.org_policies.yaml b/config/crd/bases/k8s.nginx.org_policies.yaml index 32ce353821..1ba74d4ecf 100644 --- a/config/crd/bases/k8s.nginx.org_policies.yaml +++ b/config/crd/bases/k8s.nginx.org_policies.yaml @@ -109,6 +109,92 @@ spec: otherwise the secret will be rejected as invalid. type: string type: object + cache: + description: The Cache Key defines a cache policy for proxy caching + properties: + allowedCodes: + description: |- + AllowedCodes defines which HTTP response codes should be cached. + Accepts either: + - The string "any" to cache all response codes (must be the only element) + - A list of HTTP status codes as integers (100-599) + Examples: ["any"], [200, 301, 404], [200]. + Invalid: ["any", 200] (cannot mix "any" with specific codes). + items: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: array + allowedMethods: + description: |- + AllowedMethods defines which HTTP methods should be cached. + Only "GET", "HEAD", and "POST" are supported by NGINX proxy_cache_methods directive. + GET and HEAD are always cached by default even if not specified. + Maximum of 3 items allowed. Examples: ["GET"], ["GET", "HEAD", "POST"]. + Invalid methods: PUT, DELETE, PATCH, etc. + items: + type: string + maxItems: 3 + type: array + x-kubernetes-validations: + - message: 'allowed methods must be one of: GET, HEAD, POST' + rule: self.all(method, method in ['GET', 'HEAD', 'POST']) + cachePurgeAllow: + description: |- + CachePurgeAllow defines IP addresses or CIDR blocks allowed to purge cache. + This feature is only available in NGINX Plus. + Examples: ["192.168.1.100", "10.0.0.0/8", "::1"]. + Invalid in NGINX OSS (will be ignored). + items: + type: string + type: array + cacheZoneName: + description: |- + CacheZoneName defines the name of the cache zone. Must start with a lowercase letter, + followed by alphanumeric characters or underscores, and end with an alphanumeric character. + Single lowercase letters are also allowed. Examples: "cache", "my_cache", "cache1". + pattern: ^[a-z][a-zA-Z0-9_]*[a-zA-Z0-9]$|^[a-z]$ + type: string + cacheZoneSize: + description: |- + CacheZoneSize defines the size of the cache zone. Must be a number followed by a size unit: + 'k' for kilobytes, 'm' for megabytes, or 'g' for gigabytes. + Examples: "10m", "1g", "512k". + pattern: ^[0-9]+[kmg]$ + type: string + levels: + description: |- + Levels defines the cache directory hierarchy levels for storing cached files. + Must be in format "X:Y" or "X:Y:Z" where X, Y, Z are either 1 or 2. + This controls the number of subdirectory levels and their name lengths. + Examples: "1:2", "2:2", "1:2:2". + Invalid: "3:1", "1:3", "1:2:3". + pattern: ^[12](?::[12]){0,2}$ + type: string + overrideUpstreamCache: + default: false + description: |- + OverrideUpstreamCache controls whether to override upstream cache headers + (using proxy_ignore_headers directive). When true, NGINX will ignore + cache-related headers from upstream servers like Cache-Control, Expires, etc. + Default: false. + type: boolean + time: + description: |- + Time defines the default cache time. Required when allowedCodes is specified. + Must be a number followed by a time unit: + 's' for seconds, 'm' for minutes, 'h' for hours, 'd' for days. + Examples: "30s", "5m", "1h", "2d". + pattern: ^[0-9]+[smhd]$ + type: string + required: + - cacheZoneName + - cacheZoneSize + type: object + x-kubernetes-validations: + - message: time is required when allowedCodes is specified + rule: '!has(self.allowedCodes) || (has(self.allowedCodes) && has(self.time))' egressMTLS: description: The EgressMTLS policy configures upstreams authentication and certificate verification. diff --git a/deploy/crds.yaml b/deploy/crds.yaml index 5b80d9dff9..1518d090b0 100644 --- a/deploy/crds.yaml +++ b/deploy/crds.yaml @@ -280,6 +280,92 @@ spec: otherwise the secret will be rejected as invalid. type: string type: object + cache: + description: The Cache Key defines a cache policy for proxy caching + properties: + allowedCodes: + description: |- + AllowedCodes defines which HTTP response codes should be cached. + Accepts either: + - The string "any" to cache all response codes (must be the only element) + - A list of HTTP status codes as integers (100-599) + Examples: ["any"], [200, 301, 404], [200]. + Invalid: ["any", 200] (cannot mix "any" with specific codes). + items: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: array + allowedMethods: + description: |- + AllowedMethods defines which HTTP methods should be cached. + Only "GET", "HEAD", and "POST" are supported by NGINX proxy_cache_methods directive. + GET and HEAD are always cached by default even if not specified. + Maximum of 3 items allowed. Examples: ["GET"], ["GET", "HEAD", "POST"]. + Invalid methods: PUT, DELETE, PATCH, etc. + items: + type: string + maxItems: 3 + type: array + x-kubernetes-validations: + - message: 'allowed methods must be one of: GET, HEAD, POST' + rule: self.all(method, method in ['GET', 'HEAD', 'POST']) + cachePurgeAllow: + description: |- + CachePurgeAllow defines IP addresses or CIDR blocks allowed to purge cache. + This feature is only available in NGINX Plus. + Examples: ["192.168.1.100", "10.0.0.0/8", "::1"]. + Invalid in NGINX OSS (will be ignored). + items: + type: string + type: array + cacheZoneName: + description: |- + CacheZoneName defines the name of the cache zone. Must start with a lowercase letter, + followed by alphanumeric characters or underscores, and end with an alphanumeric character. + Single lowercase letters are also allowed. Examples: "cache", "my_cache", "cache1". + pattern: ^[a-z][a-zA-Z0-9_]*[a-zA-Z0-9]$|^[a-z]$ + type: string + cacheZoneSize: + description: |- + CacheZoneSize defines the size of the cache zone. Must be a number followed by a size unit: + 'k' for kilobytes, 'm' for megabytes, or 'g' for gigabytes. + Examples: "10m", "1g", "512k". + pattern: ^[0-9]+[kmg]$ + type: string + levels: + description: |- + Levels defines the cache directory hierarchy levels for storing cached files. + Must be in format "X:Y" or "X:Y:Z" where X, Y, Z are either 1 or 2. + This controls the number of subdirectory levels and their name lengths. + Examples: "1:2", "2:2", "1:2:2". + Invalid: "3:1", "1:3", "1:2:3". + pattern: ^[12](?::[12]){0,2}$ + type: string + overrideUpstreamCache: + default: false + description: |- + OverrideUpstreamCache controls whether to override upstream cache headers + (using proxy_ignore_headers directive). When true, NGINX will ignore + cache-related headers from upstream servers like Cache-Control, Expires, etc. + Default: false. + type: boolean + time: + description: |- + Time defines the default cache time. Required when allowedCodes is specified. + Must be a number followed by a time unit: + 's' for seconds, 'm' for minutes, 'h' for hours, 'd' for days. + Examples: "30s", "5m", "1h", "2d". + pattern: ^[0-9]+[smhd]$ + type: string + required: + - cacheZoneName + - cacheZoneSize + type: object + x-kubernetes-validations: + - message: time is required when allowedCodes is specified + rule: '!has(self.allowedCodes) || (has(self.allowedCodes) && has(self.time))' egressMTLS: description: The EgressMTLS policy configures upstreams authentication and certificate verification. diff --git a/docs/crd/k8s.nginx.org_policies.md b/docs/crd/k8s.nginx.org_policies.md index d8c8bb0f1c..cb37a32ae4 100644 --- a/docs/crd/k8s.nginx.org_policies.md +++ b/docs/crd/k8s.nginx.org_policies.md @@ -26,6 +26,15 @@ The `.spec` object supports the following fields: | `basicAuth` | `object` | The basic auth policy configures NGINX to authenticate client requests using HTTP Basic authentication credentials. | | `basicAuth.realm` | `string` | The realm for the basic authentication. | | `basicAuth.secret` | `string` | The name of the Kubernetes secret that stores the Htpasswd configuration. It must be in the same namespace as the Policy resource. The secret must be of the type nginx.org/htpasswd, and the config must be stored in the secret under the key htpasswd, otherwise the secret will be rejected as invalid. | +| `cache` | `object` | The Cache Key defines a cache policy for proxy caching | +| `cache.allowedCodes` | `array` | AllowedCodes defines which HTTP response codes should be cached. Accepts either: - The string "any" to cache all response codes (must be the only element) - A list of HTTP status codes as integers (100-599) Examples: ["any"], [200, 301, 404], [200]. Invalid: ["any", 200] (cannot mix "any" with specific codes). | +| `cache.allowedMethods` | `array[string]` | AllowedMethods defines which HTTP methods should be cached. Only "GET", "HEAD", and "POST" are supported by NGINX proxy_cache_methods directive. GET and HEAD are always cached by default even if not specified. Maximum of 3 items allowed. Examples: ["GET"], ["GET", "HEAD", "POST"]. Invalid methods: PUT, DELETE, PATCH, etc. | +| `cache.cachePurgeAllow` | `array[string]` | CachePurgeAllow defines IP addresses or CIDR blocks allowed to purge cache. This feature is only available in NGINX Plus. Examples: ["192.168.1.100", "10.0.0.0/8", "::1"]. Invalid in NGINX OSS (will be ignored). | +| `cache.cacheZoneName` | `string` | CacheZoneName defines the name of the cache zone. Must start with a lowercase letter, followed by alphanumeric characters or underscores, and end with an alphanumeric character. Single lowercase letters are also allowed. Examples: "cache", "my_cache", "cache1". | +| `cache.cacheZoneSize` | `string` | CacheZoneSize defines the size of the cache zone. Must be a number followed by a size unit: 'k' for kilobytes, 'm' for megabytes, or 'g' for gigabytes. Examples: "10m", "1g", "512k". | +| `cache.levels` | `string` | Levels defines the cache directory hierarchy levels for storing cached files. Must be in format "X:Y" or "X:Y:Z" where X, Y, Z are either 1 or 2. This controls the number of subdirectory levels and their name lengths. Examples: "1:2", "2:2", "1:2:2". Invalid: "3:1", "1:3", "1:2:3". | +| `cache.overrideUpstreamCache` | `boolean` | OverrideUpstreamCache controls whether to override upstream cache headers (using proxy_ignore_headers directive). When true, NGINX will ignore cache-related headers from upstream servers like Cache-Control, Expires, etc. Default: false. | +| `cache.time` | `string` | Time defines the default cache time. Required when allowedCodes is specified. Must be a number followed by a time unit: 's' for seconds, 'm' for minutes, 'h' for hours, 'd' for days. Examples: "30s", "5m", "1h", "2d". | | `egressMTLS` | `object` | The EgressMTLS policy configures upstreams authentication and certificate verification. | | `egressMTLS.ciphers` | `string` | Specifies the enabled ciphers for requests to an upstream HTTPS server. The default is DEFAULT. | | `egressMTLS.protocols` | `string` | Specifies the protocols for requests to an upstream HTTPS server. The default is TLSv1 TLSv1.1 TLSv1.2. | diff --git a/examples/custom-resources/cache-policy/README.md b/examples/custom-resources/cache-policy/README.md new file mode 100644 index 0000000000..d3082699a5 --- /dev/null +++ b/examples/custom-resources/cache-policy/README.md @@ -0,0 +1,230 @@ +# Cache Policy + +In this example, we deploy a web application, configure load balancing for it via a VirtualServer, and apply a cache +policy to improve performance by caching responses. + +## Prerequisites + +1. Follow the [installation](https://docs.nginx.com/nginx-ingress-controller/installation/) instructions to deploy the Ingress Controller. +1. Make sure the snippets are enabled (this is only required for this example as we can see the `X-Cache-Status` header in the response, not required for functionality). +1. Save the public IP address of the Ingress Controller into a shell variable: + + ```shell + IC_IP=XXX.YYY.ZZZ.III + ``` + +1. Save the HTTPS port of the Ingress Controller into a shell variable: + + ```shell + IC_HTTPS_PORT= + ``` + +## Step 1 - Deploy a Web Application + +Create the application deployment and service: + +```shell +kubectl apply -f cafe.yaml +``` + +## Step 2 - Create the TLS Secret + +Create a secret with the TLS certificate and key: + +```shell +kubectl apply -f cafe-secret.yaml +``` + +## Step 3 - Deploy the Cache Policy + +In this step, we create a policy with the name `cache-policy` that configures NGINX to cache responses for 30 minutes. + +Create the cache policy: + +```shell +kubectl apply -f cache.yaml +``` + +This policy configures: + +- A cache zone named `testcache` with a size of 15MB +- Caching for any response codes using `allowedCodes: ["any"]` +- Caching for GET, HEAD, and POST methods +- Cache duration of 30 minutes +- Override upstream cache headers with `overrideUpstreamCache: true`, to ignore upstream cache headers + +## Step 4 - Configure Load Balancing + +Create a VirtualServer resource for the web application: + +```shell +kubectl apply -f cafe-virtual-server.yaml +``` + +Note that the VirtualServer: + +- References the policy `cache-policy` created in Step 3 +- Includes a server snippet to add the `X-Cache-Status` header to responses +- This header shows whether responses are served from cache (HIT) or fetched from upstream (MISS) + +## Step 5 - Test the Configuration + +Let's test the caching behavior by making multiple requests to the same endpoint. + +### Test Cache MISS (First Request) + +Make the first request to the `/coffee` endpoint: + +```shell +curl --resolve cafe.example.com:$IC_HTTPS_PORT:$IC_IP https://cafe.example.com:$IC_HTTPS_PORT/coffee -I --insecure + +HTTP/1.1 200 OK +Server: nginx/1.27.4 +Date: Wed, 13 Aug 2025 12:11:34 GMT +Content-Type: text/plain +Content-Length: 160 +Connection: keep-alive +Expires: Wed, 13 Aug 2025 12:11:33 GMT +Cache-Control: no-cache +X-Cache-Status: MISS +``` + +The `X-Cache-Status: MISS` header indicates this response was fetched from the upstream server. The response is now cached. + +### Test Cache HIT (Subsequent Requests) + +Make the same request again within the cache duration: + +```shell +curl --resolve cafe.example.com:$IC_HTTPS_PORT:$IC_IP https://cafe.example.com:$IC_HTTPS_PORT/coffee -I --insecure + +HTTP/1.1 200 OK +Server: nginx/1.27.4 +Date: Wed, 13 Aug 2025 12:13:00 GMT +Content-Type: text/plain +Content-Length: 160 +Connection: keep-alive +Expires: Wed, 13 Aug 2025 12:11:33 GMT +Cache-Control: no-cache +X-Cache-Status: HIT +``` + +The `X-Cache-Status: HIT` header indicates this response was served from the cache, providing faster response times. + +### Test with Request ID for Full Response + +You can also view the full response to see the Request ID: + +```shell +curl --resolve cafe.example.com:$IC_HTTPS_PORT:$IC_IP https://cafe.example.com:$IC_HTTPS_PORT/coffee --insecure + +Server address: 10.0.0.215:8080 +Server name: coffee-676c9f8944-bhvxw +Date: 13/Aug/2025:12:11:34 +0000 +URI: /coffee +Request ID: c0ca10182c70590112c622835dd060f2 +``` + +```shell +curl --resolve cafe.example.com:$IC_HTTPS_PORT:$IC_IP https://cafe.example.com:$IC_HTTPS_PORT/coffee --insecure + +Server address: 10.0.0.215:8080 +Server name: coffee-676c9f8944-bhvxw +Date: 13/Aug/2025:12:11:34 +0000 +URI: /coffee +Request ID: c0ca10182c70590112c622835dd060f2 +``` + +When you make the same request again (while it's still cached), you'll get the same cached response with the same Request ID. + +### Test Different Endpoints + +Test the `/tea` endpoint to see cache behavior for different URLs: + +```shell +curl --resolve cafe.example.com:$IC_HTTPS_PORT:$IC_IP https://cafe.example.com:$IC_HTTPS_PORT/tea -I --insecure + +HTTP/1.1 200 OK +Server: nginx/1.27.4 +Date: Wed, 13 Aug 2025 12:16:16 GMT +Content-Type: text/plain +Content-Length: 154 +Connection: keep-alive +Expires: Wed, 13 Aug 2025 12:16:15 GMT +Cache-Control: no-cache +X-Cache-Status: MISS + +``` + +Each unique URL has its own cache entry, so the first request to `/tea` will show `MISS` even if `/coffee` is already cached. + +## Cache Configuration + +The cache policy supports additional configuration options: + +### Cache Purging (NGINX Plus Only) + +For NGINX Plus deployments, you can enable cache purging by adding IP addresses or CIDR ranges to the `cachePurgeAllow` field: + +```yaml +spec: + cache: + cacheZoneName: "testcache" + cacheZoneSize: "15m" + cachePurgeAllow: ["192.168.1.0/24", "10.0.0.1"] + # ... other configuration +``` + +```shell +curl --resolve cafe.example.com:$IC_HTTPS_PORT:$IC_IP https://cafe.example.com:$IC_HTTPS_PORT/coffee -I --insecure + +HTTP/1.1 200 OK +Server: nginx/1.27.4 +Date: Wed, 13 Aug 2025 12:22:07 GMT +Content-Type: text/plain +Content-Length: 160 +Connection: keep-alive +Expires: Wed, 13 Aug 2025 12:19:29 GMT +Cache-Control: no-cache +X-Cache-Status: HIT +``` + +```shell +curl --resolve cafe.example.com:$IC_HTTPS_PORT:$IC_IP https://cafe.example.com:$IC_HTTPS_PORT/coffee -I -X PURGE --insecure + +HTTP/1.1 204 No Content +Server: nginx/1.27.4 +Date: Wed, 13 Aug 2025 12:22:39 GMT +Connection: keep-alive +``` + +```shell +curl --resolve cafe.example.com:$IC_HTTPS_PORT:$IC_IP https://cafe.example.com:$IC_HTTPS_PORT/coffee -I --insecure + +HTTP/1.1 200 OK +Server: nginx/1.27.4 +Date: Wed, 13 Aug 2025 12:22:51 GMT +Content-Type: text/plain +Content-Length: 160 +Connection: keep-alive +Expires: Wed, 13 Aug 2025 12:22:50 GMT +Cache-Control: no-cache +X-Cache-Status: MISS +``` + +This allows authorized clients to purge cached content using the PURGE HTTP method. + +### Specific Response Codes + +Instead of caching all response codes with `["any"]`, you can specify particular codes: + +```yaml +spec: + cache: + cacheZoneName: "testcache" + cacheZoneSize: "15m" + allowedCodes: [200, 301, 404] + # ... other configuration +``` + +This configuration only caches responses with 200, 301, or 404 status codes. diff --git a/examples/custom-resources/cache-policy/cache.yaml b/examples/custom-resources/cache-policy/cache.yaml new file mode 100644 index 0000000000..807dfb9194 --- /dev/null +++ b/examples/custom-resources/cache-policy/cache.yaml @@ -0,0 +1,14 @@ +apiVersion: k8s.nginx.org/v1 +kind: Policy +metadata: + name: cache-policy +spec: + cache: + cacheZoneName: "testcache" # Required + cacheZoneSize: "15m" # Required + allowedCodes: ["any"] # Optional ["any"] or [200, 301, ...], "any" cannnot be combined with specific codes + allowedMethods: ["GET", "HEAD", "POST"] # Optional + time: "30m" # Optional # e.g. "15m", "1h", "2d". Default is "10m" + overrideUpstreamCache: true # Optional, default is false + # levels: "1:2" # Optional, default is "1:1" , see https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_path for more details + # cachePurgeAllow: [""] # Optional, If set allows cache purging from the specified IPs or CIDR ranges. Nginx Plus only. diff --git a/examples/custom-resources/cache-policy/cafe-secret.yaml b/examples/custom-resources/cache-policy/cafe-secret.yaml new file mode 120000 index 0000000000..efa8919b4b --- /dev/null +++ b/examples/custom-resources/cache-policy/cafe-secret.yaml @@ -0,0 +1 @@ +../../common-secrets/cafe-secret-cafe.example.com.yaml \ No newline at end of file diff --git a/examples/custom-resources/cache-policy/cafe-virtual-server.yaml b/examples/custom-resources/cache-policy/cafe-virtual-server.yaml new file mode 100644 index 0000000000..8042866050 --- /dev/null +++ b/examples/custom-resources/cache-policy/cafe-virtual-server.yaml @@ -0,0 +1,28 @@ +apiVersion: k8s.nginx.org/v1 +kind: VirtualServer +metadata: + name: cafe +spec: + server-snippets: | + add_header X-Cache-Status $upstream_cache_status; + # This header will show the cache status for each request, e.g. X-Cache-Status: MISS or X-Cache-Status: HIT. + # The cache status can be "HIT", "MISS", "EXPIRED", etc. + policies: + - name: cache-policy + host: cafe.example.com + tls: + secret: cafe-secret + upstreams: + - name: tea + service: tea-svc + port: 80 + - name: coffee + service: coffee-svc + port: 80 + routes: + - path: /tea + action: + pass: tea + - path: /coffee + action: + pass: coffee diff --git a/examples/custom-resources/cache-policy/cafe.yaml b/examples/custom-resources/cache-policy/cafe.yaml new file mode 100644 index 0000000000..f049e8bf29 --- /dev/null +++ b/examples/custom-resources/cache-policy/cafe.yaml @@ -0,0 +1,65 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: coffee +spec: + replicas: 2 + selector: + matchLabels: + app: coffee + template: + metadata: + labels: + app: coffee + spec: + containers: + - name: coffee + image: nginxdemos/nginx-hello:plain-text + ports: + - containerPort: 8080 +--- +apiVersion: v1 +kind: Service +metadata: + name: coffee-svc +spec: + ports: + - port: 80 + targetPort: 8080 + protocol: TCP + name: http + selector: + app: coffee +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tea +spec: + replicas: 1 + selector: + matchLabels: + app: tea + template: + metadata: + labels: + app: tea + spec: + containers: + - name: tea + image: nginxdemos/nginx-hello:plain-text + ports: + - containerPort: 8080 +--- +apiVersion: v1 +kind: Service +metadata: + name: tea-svc +spec: + ports: + - port: 80 + targetPort: 8080 + protocol: TCP + name: http + selector: + app: tea diff --git a/internal/configs/version2/__snapshots__/templates_test.snap b/internal/configs/version2/__snapshots__/templates_test.snap index c7dc116be4..9f4c43c227 100644 --- a/internal/configs/version2/__snapshots__/templates_test.snap +++ b/internal/configs/version2/__snapshots__/templates_test.snap @@ -3493,3 +3493,146 @@ server { } --- + +[TestExecuteVirtualServerTemplateWithCachePolicyOSS - 1] + +upstream test-upstream {zone test-upstream ; + server 10.0.0.20:8001 max_fails=0 fail_timeout= max_conns=0; +} + +proxy_cache_path /var/cache/nginx/test_cache_basic_cache levels=1:2 keys_zone=test_cache_basic_cache:10m; +proxy_cache_path /var/cache/nginx/test_cache_location_simple_cache keys_zone=test_cache_location_simple_cache:5m; +server { + listen 80; + listen [::]:80; + + + server_name example.com; + + set $resource_type "virtualserver"; + set $resource_name ""; + set $resource_namespace ""; + + server_tokens "off"; + # Server-level cache configuration + proxy_cache test_cache_basic_cache; + proxy_cache_key $scheme$proxy_host$request_uri; + proxy_ignore_headers Cache-Control Expires Set-Cookie Vary X-Accel-Expires; + proxy_cache_valid any 1h; + proxy_cache_methods GET HEAD; + + + + + location / { + set $service ""; + + + set $default_connection_header close; + proxy_connect_timeout ; + proxy_read_timeout ; + proxy_send_timeout ; + client_max_body_size ; + + proxy_buffering off; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $vs_connection_header; + proxy_pass_request_headers off; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Host $host; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_cache test_cache_location_simple_cache; + proxy_cache_key $scheme$proxy_host$request_uri; + proxy_cache_valid 200 30m; + proxy_cache_valid 404 30m; + proxy_pass http://test-upstream; + proxy_next_upstream ; + proxy_next_upstream_timeout ; + proxy_next_upstream_tries 0; + } +} + +--- + +[TestExecuteVirtualServerTemplateWithCachePolicyNGINXPlus - 1] + +upstream test-upstream { + zone test-upstream ; + server 10.0.0.20:8001 max_fails=0 fail_timeout= max_conns=0; +} + +proxy_cache_path /var/cache/nginx/test_cache_full_advanced levels=2:2 keys_zone=test_cache_full_advanced:50m; +proxy_cache_path /var/cache/nginx/test_cache_location_location_cache keys_zone=test_cache_location_location_cache:20m; +geo $purge_allowed_test_cache_full_advanced { + default 0; + 127.0.0.1 1; + 10.0.0.0/8 1; + 192.168.1.0/24 1; +} + +map $request_method $cache_purge_test_cache_full_advanced { + PURGE $purge_allowed_test_cache_full_advanced; + default 0; +} + +server { + listen 80; + listen [::]:80; + + + server_name example.com; + status_zone example.com; + set $resource_type "virtualserver"; + set $resource_name ""; + set $resource_namespace ""; + + server_tokens "off"; + # Server-level cache configuration + proxy_cache test_cache_full_advanced; + proxy_cache_key $scheme$proxy_host$request_uri; + proxy_ignore_headers Cache-Control Expires Set-Cookie Vary X-Accel-Expires; + proxy_cache_valid 200 2h; + proxy_cache_valid 301 2h; + proxy_cache_valid 404 2h; + proxy_cache_methods GET HEAD POST; + proxy_cache_purge $cache_purge_test_cache_full_advanced; + + + + + location / { + set $service ""; + status_zone ""; + + + set $default_connection_header close; + proxy_connect_timeout ; + proxy_read_timeout ; + proxy_send_timeout ; + client_max_body_size ; + + proxy_buffering off; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $vs_connection_header; + proxy_pass_request_headers off; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Host $host; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_cache test_cache_location_location_cache; + proxy_cache_key $scheme$proxy_host$request_uri; + proxy_cache_valid any 1h; + proxy_cache_methods GET HEAD; + proxy_pass http://test-upstream; + proxy_next_upstream ; + proxy_next_upstream_timeout ; + proxy_next_upstream_tries 0; + } +} + +--- diff --git a/internal/configs/version2/http.go b/internal/configs/version2/http.go index f835656fb9..0f412d7595 100644 --- a/internal/configs/version2/http.go +++ b/internal/configs/version2/http.go @@ -22,6 +22,7 @@ type VirtualServerConfig struct { LimitReqZones []LimitReqZone Maps []Map AuthJWTClaimSets []AuthJWTClaimSet + CacheZones []CacheZone Server Server SpiffeCerts bool SpiffeClientCerts bool @@ -102,6 +103,7 @@ type Server struct { APIKeyEnabled bool WAF *WAF Dos *Dos + Cache *Cache PoliciesErrorReturn *Return VSNamespace string VSName string @@ -228,6 +230,7 @@ type Location struct { WAF *WAF Dos *Dos PoliciesErrorReturn *Return + Cache *Cache ServiceName string IsVSR bool VSRName string @@ -480,3 +483,23 @@ type Variable struct { Name string Value string } + +// CacheZone defines a proxy cache zone configuration. +type CacheZone struct { + Name string + Size string + Path string + Levels string // Optional. Directory hierarchy for cache files (e.g., "1:2", "2:2", "1:2:2") +} + +// Cache defines cache configuration for locations. +type Cache struct { + ZoneName string + ZoneSize string + Time string + Valid map[string]string // map for codes to time + AllowedMethods []string // HTTP methods allowed for caching based on proxy_cache_methods + CachePurgeAllow []string // IPs/CIDRs allowed to purge cache + OverrideUpstreamCache bool // Controls whether to override upstream cache headers + Levels string // Optional. Directory hierarchy for cache files (e.g., "1:2", "2:2", "1:2:2") +} diff --git a/internal/configs/version2/nginx-plus.virtualserver.tmpl b/internal/configs/version2/nginx-plus.virtualserver.tmpl index 90f264ded7..b9a097bc4e 100644 --- a/internal/configs/version2/nginx-plus.virtualserver.tmpl +++ b/internal/configs/version2/nginx-plus.virtualserver.tmpl @@ -70,6 +70,10 @@ map {{ $m.Source }} {{ $m.Variable }} { limit_req_zone {{ $z.Key }} zone={{ $z.ZoneName }}:{{ $z.ZoneSize }} rate={{ $z.Rate }}{{- if $z.Sync }} sync{{- end }}; {{- end }} +{{- range $c := .CacheZones }} +proxy_cache_path {{ $c.Path }}{{ if $c.Levels }} levels={{ $c.Levels }}{{ end }} keys_zone={{ $c.Name }}:{{ $c.Size }}; +{{- end }} + {{- range $m := .StatusMatches }} match {{ $m.Name }} { status {{ $m.Code }}; @@ -78,6 +82,39 @@ match {{ $m.Name }} { {{- $s := .Server }} +{{- /* Generate cache-zone-specific purge configuration with VirtualServer isolation */ -}} +{{- /* Check server-level cache purge restrictions */ -}} +{{- if and $s.Cache (gt (len $s.Cache.CachePurgeAllow) 0) }} +geo $purge_allowed_{{ replaceAll $s.Cache.ZoneName "-" "_" }} { + default 0; +{{- range $ip := $s.Cache.CachePurgeAllow }} + {{ $ip }} 1; +{{- end }} +} + +map $request_method $cache_purge_{{ replaceAll $s.Cache.ZoneName "-" "_" }} { + PURGE $purge_allowed_{{ replaceAll $s.Cache.ZoneName "-" "_" }}; + default 0; +} +{{- end }} + +{{- /* Check location-level cache purge restrictions */ -}} +{{- range $l := $s.Locations }} +{{- if and $l.Cache (gt (len $l.Cache.CachePurgeAllow) 0) }} +geo $purge_allowed_{{ replaceAll $l.Cache.ZoneName "-" "_" }} { + default 0; +{{- range $ip := $l.Cache.CachePurgeAllow }} + {{ $ip }} 1; +{{- end }} +} + +map $request_method $cache_purge_{{ replaceAll $l.Cache.ZoneName "-" "_" }} { + PURGE $purge_allowed_{{ replaceAll $l.Cache.ZoneName "-" "_" }}; + default 0; +} +{{- end }} +{{- end }} + {{- with $s.JWKSAuthEnabled }} proxy_cache_path /var/cache/nginx/jwks_uri_{{$s.VSName}} levels=1 keys_zone=jwks_uri_{{$s.VSName}}:1m max_size=10m; {{- end }} @@ -187,6 +224,27 @@ server { return {{ .Code }}; {{- end }} + {{- with $s.Cache }} + # Server-level cache configuration + proxy_cache {{ $s.Cache.ZoneName }}; + proxy_cache_key $scheme$proxy_host$request_uri; + {{- if $s.Cache.OverrideUpstreamCache }} + proxy_ignore_headers Cache-Control Expires Set-Cookie Vary X-Accel-Expires; + {{- end }} + {{- if and $s.Cache.Time (eq (len $s.Cache.Valid) 0) }} + proxy_cache_valid {{ $s.Cache.Time }}; + {{- end }} + {{- range $code, $time := $s.Cache.Valid }} + proxy_cache_valid {{ $code }} {{ $time }}; + {{- end }} + {{- if $s.Cache.AllowedMethods }} + proxy_cache_methods{{ range $s.Cache.AllowedMethods }} {{ . }}{{ end }}; + {{- end }} + {{- if gt (len $s.Cache.CachePurgeAllow) 0 }} + proxy_cache_purge $cache_purge_{{ replaceAll $s.Cache.ZoneName "-" "_" }}; + {{- end }} + {{- end }} + {{- range $allow := $s.Allow }} allow {{ $allow }}; {{- end }} @@ -672,6 +730,27 @@ server { {{ $proxyOrGRPC }}_ssl_verify_depth 25; {{ $proxyOrGRPC }}_ssl_name {{ $l.ProxySSLName }}; {{- end }} + + {{- with $l.Cache }} + proxy_cache {{ $l.Cache.ZoneName }}; + proxy_cache_key $scheme$proxy_host$request_uri; + {{- if $l.Cache.OverrideUpstreamCache }} + proxy_ignore_headers Cache-Control Expires Set-Cookie Vary X-Accel-Expires; + {{- end }} + {{- if and $l.Cache.Time (eq (len $l.Cache.Valid) 0) }} + proxy_cache_valid {{ $l.Cache.Time }}; + {{- end }} + {{- range $code, $time := $l.Cache.Valid }} + proxy_cache_valid {{ $code }} {{ $time }}; + {{- end }} + {{- if $l.Cache.AllowedMethods }} + proxy_cache_methods{{ range $l.Cache.AllowedMethods }} {{ . }}{{ end }}; + {{- end }} + {{- if gt (len $l.Cache.CachePurgeAllow) 0 }} + proxy_cache_purge $cache_purge_{{ replaceAll $l.Cache.ZoneName "-" "_" }}; + {{- end }} + {{- end }} + {{- if $l.GRPCPass }} grpc_pass {{ $l.GRPCPass }}; {{- else }} diff --git a/internal/configs/version2/nginx.virtualserver.tmpl b/internal/configs/version2/nginx.virtualserver.tmpl index 4721b3c879..bde55f4d9d 100644 --- a/internal/configs/version2/nginx.virtualserver.tmpl +++ b/internal/configs/version2/nginx.virtualserver.tmpl @@ -40,6 +40,10 @@ map {{ $m.Source }} {{ $m.Variable }} { limit_req_zone {{ $z.Key }} zone={{ $z.ZoneName }}:{{ $z.ZoneSize }} rate={{ $z.Rate }}; {{- end }} +{{- range $c := .CacheZones }} +proxy_cache_path {{ $c.Path }}{{ if $c.Levels }} levels={{ $c.Levels }}{{ end }} keys_zone={{ $c.Name }}:{{ $c.Size }}; +{{- end }} + {{- $s := .Server }} server { {{- if $s.Gunzip }} @@ -114,6 +118,24 @@ server { return {{ .Code }}; {{- end }} + {{- with $s.Cache }} + # Server-level cache configuration + proxy_cache {{ $s.Cache.ZoneName }}; + proxy_cache_key $scheme$proxy_host$request_uri; + {{- if $s.Cache.OverrideUpstreamCache }} + proxy_ignore_headers Cache-Control Expires Set-Cookie Vary X-Accel-Expires; + {{- end }} + {{- if and $s.Cache.Time (eq (len $s.Cache.Valid) 0) }} + proxy_cache_valid {{ $s.Cache.Time }}; + {{- end }} + {{- range $code, $time := $s.Cache.Valid }} + proxy_cache_valid {{ $code }} {{ $time }}; + {{- end }} + {{- if $s.Cache.AllowedMethods }} + proxy_cache_methods{{ range $s.Cache.AllowedMethods }} {{ . }}{{ end }}; + {{- end }} + {{- end }} + {{- range $allow := $s.Allow }} allow {{ $allow }}; {{- end }} @@ -412,6 +434,24 @@ server { {{ $proxyOrGRPC }}_ssl_verify_depth 25; {{ $proxyOrGRPC }}_ssl_name {{ $l.ProxySSLName }}; {{- end }} + + {{- with $l.Cache }} + proxy_cache {{ $l.Cache.ZoneName }}; + proxy_cache_key $scheme$proxy_host$request_uri; + {{- if $l.Cache.OverrideUpstreamCache }} + proxy_ignore_headers Cache-Control Expires Set-Cookie Vary X-Accel-Expires; + {{- end }} + {{- if and $l.Cache.Time (eq (len $l.Cache.Valid) 0) }} + proxy_cache_valid {{ $l.Cache.Time }}; + {{- end }} + {{- range $code, $time := $l.Cache.Valid }} + proxy_cache_valid {{ $code }} {{ $time }}; + {{- end }} + {{- if $l.Cache.AllowedMethods }} + proxy_cache_methods{{ range $l.Cache.AllowedMethods }} {{ . }}{{ end }}; + {{- end }} + {{- end }} + {{- if $l.GRPCPass }} grpc_pass {{ $l.GRPCPass }}; {{- else }} diff --git a/internal/configs/version2/templates_test.go b/internal/configs/version2/templates_test.go index beaa511dfa..d81455d032 100644 --- a/internal/configs/version2/templates_test.go +++ b/internal/configs/version2/templates_test.go @@ -875,6 +875,125 @@ func TestExecuteVirtualServerTemplateWithOIDCAndPKCEPolicyNGINXPlus(t *testing.T t.Log(string(got)) } +func TestExecuteVirtualServerTemplateWithCachePolicyNGINXPlus(t *testing.T) { + t.Parallel() + executor := newTmplExecutorNGINXPlus(t) + got, err := executor.ExecuteVirtualServerTemplate(&virtualServerCfgWithCachePolicyNGINXPlus) + if err != nil { + t.Error(err) + } + + // Check cache zone declaration + expectedCacheZone := "proxy_cache_path /var/cache/nginx/test_cache_full_advanced levels=2:2 keys_zone=test_cache_full_advanced:50m;" + if !bytes.Contains(got, []byte(expectedCacheZone)) { + t.Errorf("Expected cache zone declaration: %s", expectedCacheZone) + } + + // Check cache purge configuration for NGINX Plus + expectedPurgeGeo := "geo $purge_allowed_test_cache_full_advanced {" + if !bytes.Contains(got, []byte(expectedPurgeGeo)) { + t.Errorf("Expected purge geo block: %s", expectedPurgeGeo) + } + + expectedPurgeMap := "map $request_method $cache_purge_test_cache_full_advanced {" + if !bytes.Contains(got, []byte(expectedPurgeMap)) { + t.Errorf("Expected purge map block: %s", expectedPurgeMap) + } + + // Check server-level cache configuration + expectedServerCacheDirectives := []string{ + "proxy_cache test_cache_full_advanced;", + "proxy_cache_key $scheme$proxy_host$request_uri;", + "proxy_ignore_headers Cache-Control Expires Set-Cookie Vary X-Accel-Expires;", + "proxy_cache_valid 200 2h;", + "proxy_cache_valid 404 2h;", + "proxy_cache_valid 301 2h;", + "proxy_cache_methods GET HEAD POST;", + "proxy_cache_purge $cache_purge_test_cache_full_advanced;", + } + + for _, directive := range expectedServerCacheDirectives { + if !bytes.Contains(got, []byte(directive)) { + t.Errorf("Expected server cache directive: %s", directive) + } + } + + // Check location-level cache configuration + expectedLocationCacheDirectives := []string{ + "proxy_cache test_cache_location_location_cache;", + "proxy_cache_valid any 1h;", + "proxy_cache_methods GET HEAD;", + } + + for _, directive := range expectedLocationCacheDirectives { + if !bytes.Contains(got, []byte(directive)) { + t.Errorf("Expected location cache directive: %s", directive) + } + } + + snaps.MatchSnapshot(t, string(got)) + t.Log(string(got)) +} + +func TestExecuteVirtualServerTemplateWithCachePolicyOSS(t *testing.T) { + t.Parallel() + executor := newTmplExecutorNGINX(t) + got, err := executor.ExecuteVirtualServerTemplate(&virtualServerCfgWithCachePolicyOSS) + if err != nil { + t.Error(err) + } + + // Check cache zone declaration + expectedCacheZone := "proxy_cache_path /var/cache/nginx/test_cache_basic_cache levels=1:2 keys_zone=test_cache_basic_cache:10m;" + if !bytes.Contains(got, []byte(expectedCacheZone)) { + t.Errorf("Expected cache zone declaration: %s", expectedCacheZone) + } + + // Ensure no purge configuration for OSS (cachePurgeAllow should be ignored) + if bytes.Contains(got, []byte("geo $purge_allowed")) { + t.Error("OSS template should not contain cache purge geo blocks") + } + + if bytes.Contains(got, []byte("map $request_method $cache_purge")) { + t.Error("OSS template should not contain cache purge map blocks") + } + + if bytes.Contains(got, []byte("proxy_cache_purge")) { + t.Error("OSS template should not contain proxy_cache_purge directive") + } + + // Check server-level cache configuration + expectedServerCacheDirectives := []string{ + "proxy_cache test_cache_basic_cache;", + "proxy_cache_key $scheme$proxy_host$request_uri;", + "proxy_ignore_headers Cache-Control Expires Set-Cookie Vary X-Accel-Expires;", + "proxy_cache_valid any 1h;", + "proxy_cache_methods GET HEAD;", + } + + for _, directive := range expectedServerCacheDirectives { + if !bytes.Contains(got, []byte(directive)) { + t.Errorf("Expected server cache directive: %s", directive) + } + } + + // Check location-level cache configuration + expectedLocationCacheDirectives := []string{ + "proxy_cache test_cache_location_simple_cache;", + "proxy_cache_valid 200 30m;", + "proxy_cache_valid 404 30m;", + } + + for _, directive := range expectedLocationCacheDirectives { + if !bytes.Contains(got, []byte(directive)) { + t.Errorf("Expected location cache directive: %s", directive) + } + } + + snaps.MatchSnapshot(t, string(got)) + t.Log(string(got)) +} + func vsConfig() VirtualServerConfig { return VirtualServerConfig{ LimitReqZones: []LimitReqZone{ @@ -2639,6 +2758,126 @@ var ( }, } + virtualServerCfgWithCachePolicyNGINXPlus = VirtualServerConfig{ + CacheZones: []CacheZone{ + { + Name: "test_cache_full_advanced", + Size: "50m", + Path: "/var/cache/nginx/test_cache_full_advanced", + Levels: "2:2", + }, + { + Name: "test_cache_location_location_cache", + Size: "20m", + Path: "/var/cache/nginx/test_cache_location_location_cache", + Levels: "", + }, + }, + Upstreams: []Upstream{ + { + Name: "test-upstream", + Servers: []UpstreamServer{ + { + Address: "10.0.0.20:8001", + }, + }, + }, + }, + Server: Server{ + ServerName: "example.com", + StatusZone: "example.com", + ServerTokens: "off", + // Server-level cache policy with all advanced options (NGINX Plus) + Cache: &Cache{ + ZoneName: "test_cache_full_advanced", + ZoneSize: "50m", + Time: "2h", + Valid: map[string]string{"200": "2h", "404": "2h", "301": "2h"}, + AllowedMethods: []string{"GET", "HEAD", "POST"}, + CachePurgeAllow: []string{"127.0.0.1", "10.0.0.0/8", "192.168.1.0/24"}, + OverrideUpstreamCache: true, + Levels: "2:2", + }, + Locations: []Location{ + { + Path: "/", + ProxyPass: "http://test-upstream", + // Location-level cache policy with basic options + Cache: &Cache{ + ZoneName: "test_cache_location_location_cache", + ZoneSize: "20m", + Time: "1h", + Valid: map[string]string{"any": "1h"}, + AllowedMethods: []string{"GET", "HEAD"}, + CachePurgeAllow: nil, + OverrideUpstreamCache: false, + Levels: "", + }, + }, + }, + }, + } + + virtualServerCfgWithCachePolicyOSS = VirtualServerConfig{ + CacheZones: []CacheZone{ + { + Name: "test_cache_basic_cache", + Size: "10m", + Path: "/var/cache/nginx/test_cache_basic_cache", + Levels: "1:2", + }, + { + Name: "test_cache_location_simple_cache", + Size: "5m", + Path: "/var/cache/nginx/test_cache_location_simple_cache", + Levels: "", + }, + }, + Upstreams: []Upstream{ + { + Name: "test-upstream", + Servers: []UpstreamServer{ + { + Address: "10.0.0.20:8001", + }, + }, + }, + }, + Server: Server{ + ServerName: "example.com", + StatusZone: "example.com", + ServerTokens: "off", + // Server-level cache policy with basic options (OSS) + Cache: &Cache{ + ZoneName: "test_cache_basic_cache", + ZoneSize: "10m", + Time: "1h", + Valid: map[string]string{"any": "1h"}, + AllowedMethods: []string{"GET", "HEAD"}, + CachePurgeAllow: []string{"127.0.0.1"}, // This should be ignored for OSS + OverrideUpstreamCache: true, + Levels: "1:2", + }, + Locations: []Location{ + { + Path: "/", + ProxyPass: "http://test-upstream", + // Location-level cache policy with specific status codes + Cache: &Cache{ + ZoneName: "test_cache_location_simple_cache", + ZoneSize: "5m", + Time: "30m", + Valid: map[string]string{"200": "30m", "404": "30m"}, + AllowedMethods: nil, + CachePurgeAllow: nil, + OverrideUpstreamCache: false, + Levels: "", + }, + }, + }, + }, + } + transportServerCfg = TransportServerConfig{ Upstreams: []StreamUpstream{ { diff --git a/internal/configs/virtualserver.go b/internal/configs/virtualserver.go index 9cc56a1942..aef8198fc9 100644 --- a/internal/configs/virtualserver.go +++ b/internal/configs/virtualserver.go @@ -22,6 +22,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" ) const ( @@ -467,10 +468,14 @@ func (vsc *virtualServerConfigurator) GenerateVirtualServerConfig( var healthChecks []version2.HealthCheck var limitReqZones []version2.LimitReqZone var authJWTClaimSets []version2.AuthJWTClaimSet + var cacheZones []version2.CacheZone limitReqZones = append(limitReqZones, policiesCfg.RateLimit.Zones...) authJWTClaimSets = append(authJWTClaimSets, policiesCfg.RateLimit.AuthJWTClaimSets...) + // Add cache zone from global policy if present + addCacheZone(&cacheZones, policiesCfg.Cache) + // generate upstreams for VirtualServer for _, u := range vsEx.VirtualServer.Spec.Upstreams { upstreams, healthChecks, statusMatches = generateUpstreams( @@ -602,6 +607,9 @@ func (vsc *virtualServerConfigurator) GenerateVirtualServerConfig( authJWTClaimSets = append(authJWTClaimSets, routePoliciesCfg.RateLimit.AuthJWTClaimSets...) + // Add cache zone from route policy if present + addCacheZone(&cacheZones, routePoliciesCfg.Cache) + dosRouteCfg := generateDosCfg(dosResources[r.Path]) if len(r.Matches) > 0 { @@ -751,6 +759,9 @@ func (vsc *virtualServerConfigurator) GenerateVirtualServerConfig( authJWTClaimSets = append(authJWTClaimSets, routePoliciesCfg.RateLimit.AuthJWTClaimSets...) + // Add cache zone from subroute policy if present + addCacheZone(&cacheZones, routePoliciesCfg.Cache) + dosRouteCfg := generateDosCfg(dosResources[r.Path]) if len(r.Matches) > 0 { @@ -838,6 +849,7 @@ func (vsc *virtualServerConfigurator) GenerateVirtualServerConfig( StatusMatches: statusMatches, LimitReqZones: removeDuplicateLimitReqZones(limitReqZones), AuthJWTClaimSets: removeDuplicateAuthJWTClaimSets(authJWTClaimSets), + CacheZones: cacheZones, HTTPSnippets: httpSnippets, Server: version2.Server{ ServerName: vsEx.VirtualServer.Spec.Host, @@ -879,6 +891,7 @@ func (vsc *virtualServerConfigurator) GenerateVirtualServerConfig( OIDC: vsc.oidcPolCfg.oidc, WAF: policiesCfg.WAF, Dos: dosCfg, + Cache: policiesCfg.Cache, PoliciesErrorReturn: policiesCfg.ErrorReturn, VSNamespace: vsEx.VirtualServer.Namespace, VSName: vsEx.VirtualServer.Name, @@ -973,6 +986,7 @@ type policiesCfg struct { OIDC bool APIKey apiKeyAuth WAF *version2.WAF + Cache *version2.Cache ErrorReturn *version2.Return BundleValidator bundleValidator } @@ -1732,6 +1746,13 @@ func (vsc *virtualServerConfigurator) generatePolicies( ownerDetails.vsName, policyOpts.secretRefs) case pol.Spec.WAF != nil: res = config.addWAFConfig(vsc.cfgParams.Context, pol.Spec.WAF, key, polNamespace, policyOpts.apResources) + case pol.Spec.Cache != nil: + res = newValidationResults() + if config.Cache != nil { + res.addWarningf("Multiple cache policies in the same context is not valid. Cache policy %s will be ignored", key) + } else { + config.Cache = generateCacheConfig(pol.Spec.Cache, ownerDetails.vsNamespace, ownerDetails.vsName, ownerDetails.ownerNamespace, ownerDetails.ownerName) + } default: res = newValidationResults() } @@ -1891,6 +1912,72 @@ func generateLimitReqOptions(rateLimitPol *conf_v1.RateLimit) version2.LimitReqO } } +func generateCacheConfig(cache *conf_v1.Cache, vsNamespace, vsName, ownerNamespace, ownerName string) *version2.Cache { + // Create unique zone name including VS namespace/name and owner namespace/name for policy reuse + // This ensures that the same cache policy can be safely reused across different VS/VSR + var uniqueZoneName string + if vsNamespace == ownerNamespace && vsName == ownerName { + // Policy is applied directly to VirtualServer, use VS namespace/name only + uniqueZoneName = fmt.Sprintf("%s_%s_%s", vsNamespace, vsName, cache.CacheZoneName) + } else { + // Policy is applied to VirtualServerRoute, include both VS and owner info + uniqueZoneName = fmt.Sprintf("%s_%s_%s_%s_%s", vsNamespace, vsName, ownerNamespace, ownerName, cache.CacheZoneName) + } + + cacheConfig := &version2.Cache{ + ZoneName: uniqueZoneName, + Time: cache.Time, + Valid: make(map[string]string), + AllowedMethods: cache.AllowedMethods, + CachePurgeAllow: cache.CachePurgeAllow, + ZoneSize: cache.CacheZoneSize, + OverrideUpstreamCache: cache.OverrideUpstreamCache, + Levels: cache.Levels, // Pass Levels from Cache to CacheZone + } + + // Convert allowed codes to proxy_cache_valid entries + for _, code := range cache.AllowedCodes { + if cache.Time != "" { + if code.Type == intstr.String { + // Handle the "any" string case + cacheConfig.Valid[code.StrVal] = cache.Time + } else { + // Handle integer status codes + cacheConfig.Valid[fmt.Sprintf("%d", code.IntVal)] = cache.Time + } + } + } + + return cacheConfig +} + +func addCacheZone(cacheZones *[]version2.CacheZone, cache *version2.Cache) { + if cache == nil { + return + } + + zoneSize := "10m" // default + if cache.ZoneSize != "" { + zoneSize = cache.ZoneSize + } + + cacheZone := version2.CacheZone{ + Name: cache.ZoneName, + Size: zoneSize, + Path: fmt.Sprintf("/var/cache/nginx/%s", cache.ZoneName), + Levels: cache.Levels, // Pass Levels from Cache to CacheZone + } + + // Check for duplicates + for _, existing := range *cacheZones { + if existing.Name == cacheZone.Name { + return // Already exists, don't add duplicate + } + } + + *cacheZones = append(*cacheZones, cacheZone) +} + func removeDuplicateLimitReqZones(rlz []version2.LimitReqZone) []version2.LimitReqZone { encountered := make(map[string]bool) result := []version2.LimitReqZone{} @@ -1975,6 +2062,7 @@ func addPoliciesCfgToLocation(cfg policiesCfg, location *version2.Location) { location.OIDC = cfg.OIDC location.WAF = cfg.WAF location.APIKey = cfg.APIKey.Key + location.Cache = cfg.Cache location.PoliciesErrorReturn = cfg.ErrorReturn } diff --git a/internal/configs/virtualserver_test.go b/internal/configs/virtualserver_test.go index 106e4162e2..0cdc576d04 100644 --- a/internal/configs/virtualserver_test.go +++ b/internal/configs/virtualserver_test.go @@ -22,6 +22,7 @@ import ( api_v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" ) func createPointerFromBool(b bool) *bool { @@ -11327,13 +11328,559 @@ func TestGenerateVirtualServerConfigWithRateLimitGroupsWarning(t *testing.T) { } } +func TestGenerateVirtualServerConfigCache(t *testing.T) { + t.Parallel() + + tests := []struct { + msg string + virtualServerEx VirtualServerEx + expected version2.VirtualServerConfig + }{ + { + msg: "cache policy at vs spec level", + virtualServerEx: VirtualServerEx{ + VirtualServer: &conf_v1.VirtualServer{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: "cafe", + Namespace: "default", + }, + Spec: conf_v1.VirtualServerSpec{ + Host: "cafe.example.com", + Policies: []conf_v1.PolicyReference{ + { + Name: "cache-policy", + }, + }, + Upstreams: []conf_v1.Upstream{ + { + Name: "tea", + Service: "tea-svc", + Port: 80, + }, + { + Name: "coffee", + Service: "coffee-svc", + Port: 80, + }, + }, + Routes: []conf_v1.Route{ + { + Path: "/tea", + Action: &conf_v1.Action{ + Pass: "tea", + }, + }, + { + Path: "/coffee", + Action: &conf_v1.Action{ + Pass: "coffee", + }, + }, + }, + }, + }, + Policies: map[string]*conf_v1.Policy{ + "default/cache-policy": { + ObjectMeta: meta_v1.ObjectMeta{ + Name: "cache-policy", + Namespace: "default", + }, + Spec: conf_v1.PolicySpec{ + Cache: &conf_v1.Cache{ + CacheZoneName: "my-cache", + CacheZoneSize: "10m", + Time: "1h", + }, + }, + }, + }, + Endpoints: map[string][]string{ + "default/tea-svc:80": { + "10.0.0.20:80", + }, + "default/coffee-svc:80": { + "10.0.0.30:80", + }, + }, + }, + expected: version2.VirtualServerConfig{ + Upstreams: []version2.Upstream{ + { + UpstreamLabels: version2.UpstreamLabels{ + Service: "coffee-svc", + ResourceType: "virtualserver", + ResourceName: "cafe", + ResourceNamespace: "default", + }, + Name: "vs_default_cafe_coffee", + Servers: []version2.UpstreamServer{ + { + Address: "10.0.0.30:80", + }, + }, + }, + { + UpstreamLabels: version2.UpstreamLabels{ + Service: "tea-svc", + ResourceType: "virtualserver", + ResourceName: "cafe", + ResourceNamespace: "default", + }, + Name: "vs_default_cafe_tea", + Servers: []version2.UpstreamServer{ + { + Address: "10.0.0.20:80", + }, + }, + }, + }, + HTTPSnippets: []string{}, + LimitReqZones: []version2.LimitReqZone{}, + CacheZones: []version2.CacheZone{ + { + Name: "default_cafe_my-cache", + Size: "10m", + Path: "/var/cache/nginx/default_cafe_my-cache", + Levels: "", + }, + }, + Server: version2.Server{ + ServerName: "cafe.example.com", + StatusZone: "cafe.example.com", + ServerTokens: "off", + VSNamespace: "default", + VSName: "cafe", + Cache: &version2.Cache{ + ZoneName: "default_cafe_my-cache", + ZoneSize: "10m", + Time: "1h", + Valid: map[string]string{}, + AllowedMethods: nil, + CachePurgeAllow: nil, + OverrideUpstreamCache: false, + Levels: "", + }, + Locations: []version2.Location{ + { + Path: "/tea", + ProxyPass: "http://vs_default_cafe_tea", + ProxyNextUpstream: "error timeout", + ProxyNextUpstreamTimeout: "0s", + ProxyNextUpstreamTries: 0, + ProxySSLName: "tea-svc.default.svc", + ProxyPassRequestHeaders: true, + ProxySetHeaders: []version2.Header{{Name: "Host", Value: "$host"}}, + ServiceName: "tea-svc", + }, + { + Path: "/coffee", + ProxyPass: "http://vs_default_cafe_coffee", + ProxyNextUpstream: "error timeout", + ProxyNextUpstreamTimeout: "0s", + ProxyNextUpstreamTries: 0, + ProxySSLName: "coffee-svc.default.svc", + ProxyPassRequestHeaders: true, + ProxySetHeaders: []version2.Header{{Name: "Host", Value: "$host"}}, + ServiceName: "coffee-svc", + }, + }, + }, + }, + }, + { + msg: "cache policy at route level", + virtualServerEx: VirtualServerEx{ + VirtualServer: &conf_v1.VirtualServer{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: "cafe", + Namespace: "default", + }, + Spec: conf_v1.VirtualServerSpec{ + Host: "cafe.example.com", + Upstreams: []conf_v1.Upstream{ + { + Name: "tea", + Service: "tea-svc", + Port: 80, + }, + { + Name: "coffee", + Service: "coffee-svc", + Port: 80, + }, + }, + Routes: []conf_v1.Route{ + { + Path: "/tea", + Policies: []conf_v1.PolicyReference{ + { + Name: "route-cache-policy", + }, + }, + Action: &conf_v1.Action{ + Pass: "tea", + }, + }, + { + Path: "/coffee", + Action: &conf_v1.Action{ + Pass: "coffee", + }, + }, + }, + }, + }, + Policies: map[string]*conf_v1.Policy{ + "default/route-cache-policy": { + ObjectMeta: meta_v1.ObjectMeta{ + Name: "route-cache-policy", + Namespace: "default", + }, + Spec: conf_v1.PolicySpec{ + Cache: &conf_v1.Cache{ + CacheZoneName: "route-cache", + CacheZoneSize: "5m", + Time: "30m", + AllowedCodes: []intstr.IntOrString{ + intstr.FromInt(200), + intstr.FromInt(404), + }, + }, + }, + }, + }, + Endpoints: map[string][]string{ + "default/tea-svc:80": { + "10.0.0.20:80", + }, + "default/coffee-svc:80": { + "10.0.0.30:80", + }, + }, + }, + expected: version2.VirtualServerConfig{ + Upstreams: []version2.Upstream{ + { + UpstreamLabels: version2.UpstreamLabels{ + Service: "coffee-svc", + ResourceType: "virtualserver", + ResourceName: "cafe", + ResourceNamespace: "default", + }, + Name: "vs_default_cafe_coffee", + Servers: []version2.UpstreamServer{ + { + Address: "10.0.0.30:80", + }, + }, + }, + { + UpstreamLabels: version2.UpstreamLabels{ + Service: "tea-svc", + ResourceType: "virtualserver", + ResourceName: "cafe", + ResourceNamespace: "default", + }, + Name: "vs_default_cafe_tea", + Servers: []version2.UpstreamServer{ + { + Address: "10.0.0.20:80", + }, + }, + }, + }, + HTTPSnippets: []string{}, + LimitReqZones: []version2.LimitReqZone{}, + CacheZones: []version2.CacheZone{ + { + Name: "default_cafe_route-cache", + Size: "5m", + Path: "/var/cache/nginx/default_cafe_route-cache", + Levels: "", + }, + }, + Server: version2.Server{ + ServerName: "cafe.example.com", + StatusZone: "cafe.example.com", + ServerTokens: "off", + VSNamespace: "default", + VSName: "cafe", + Locations: []version2.Location{ + { + Path: "/tea", + ProxyPass: "http://vs_default_cafe_tea", + ProxyNextUpstream: "error timeout", + ProxyNextUpstreamTimeout: "0s", + ProxyNextUpstreamTries: 0, + ProxySSLName: "tea-svc.default.svc", + ProxyPassRequestHeaders: true, + ProxySetHeaders: []version2.Header{{Name: "Host", Value: "$host"}}, + ServiceName: "tea-svc", + Cache: &version2.Cache{ + ZoneName: "default_cafe_route-cache", + ZoneSize: "5m", + Time: "30m", + Valid: map[string]string{"200": "30m", "404": "30m"}, + AllowedMethods: nil, + CachePurgeAllow: nil, + OverrideUpstreamCache: false, + Levels: "", + }, + }, + { + Path: "/coffee", + ProxyPass: "http://vs_default_cafe_coffee", + ProxyNextUpstream: "error timeout", + ProxyNextUpstreamTimeout: "0s", + ProxyNextUpstreamTries: 0, + ProxySSLName: "coffee-svc.default.svc", + ProxyPassRequestHeaders: true, + ProxySetHeaders: []version2.Header{{Name: "Host", Value: "$host"}}, + ServiceName: "coffee-svc", + }, + }, + }, + }, + }, + { + msg: "cache policy at VSR subroute level", + virtualServerEx: VirtualServerEx{ + VirtualServer: &conf_v1.VirtualServer{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: "cafe", + Namespace: "default", + }, + Spec: conf_v1.VirtualServerSpec{ + Host: "cafe.example.com", + Upstreams: []conf_v1.Upstream{ + { + Name: "tea", + Service: "tea-svc", + Port: 80, + }, + }, + Routes: []conf_v1.Route{ + { + Path: "/tea", + Route: "default/tea-vsr", + }, + }, + }, + }, + VirtualServerRoutes: []*conf_v1.VirtualServerRoute{ + { + ObjectMeta: meta_v1.ObjectMeta{ + Name: "tea-vsr", + Namespace: "default", + }, + Spec: conf_v1.VirtualServerRouteSpec{ + Host: "cafe.example.com", + Upstreams: []conf_v1.Upstream{ + { + Name: "tea-v1", + Service: "tea-v1-svc", + Port: 80, + }, + { + Name: "tea-v2", + Service: "tea-v2-svc", + Port: 80, + }, + }, + Subroutes: []conf_v1.Route{ + { + Path: "/tea/v1", + Policies: []conf_v1.PolicyReference{ + { + Name: "vsr-cache-policy", + }, + }, + Action: &conf_v1.Action{ + Pass: "tea-v1", + }, + }, + { + Path: "/tea/v2", + Action: &conf_v1.Action{ + Pass: "tea-v2", + }, + }, + }, + }, + }, + }, + Policies: map[string]*conf_v1.Policy{ + "default/vsr-cache-policy": { + ObjectMeta: meta_v1.ObjectMeta{ + Name: "vsr-cache-policy", + Namespace: "default", + }, + Spec: conf_v1.PolicySpec{ + Cache: &conf_v1.Cache{ + CacheZoneName: "vsr-cache", + CacheZoneSize: "20m", + Time: "2h", + OverrideUpstreamCache: true, + CachePurgeAllow: []string{"127.0.0.1"}, + }, + }, + }, + }, + Endpoints: map[string][]string{ + "default/tea-svc:80": { + "10.0.0.20:80", + }, + "default/tea-v1-svc:80": { + "10.0.0.21:80", + }, + "default/tea-v2-svc:80": { + "10.0.0.22:80", + }, + }, + }, + expected: version2.VirtualServerConfig{ + Upstreams: []version2.Upstream{ + { + UpstreamLabels: version2.UpstreamLabels{ + Service: "tea-svc", + ResourceType: "virtualserver", + ResourceName: "cafe", + ResourceNamespace: "default", + }, + Name: "vs_default_cafe_tea", + Servers: []version2.UpstreamServer{ + { + Address: "10.0.0.20:80", + }, + }, + }, + { + UpstreamLabels: version2.UpstreamLabels{ + Service: "tea-v1-svc", + ResourceType: "virtualserverroute", + ResourceName: "tea-vsr", + ResourceNamespace: "default", + }, + Name: "vs_default_cafe_vsr_default_tea-vsr_tea-v1", + Servers: []version2.UpstreamServer{ + { + Address: "10.0.0.21:80", + }, + }, + }, + { + UpstreamLabels: version2.UpstreamLabels{ + Service: "tea-v2-svc", + ResourceType: "virtualserverroute", + ResourceName: "tea-vsr", + ResourceNamespace: "default", + }, + Name: "vs_default_cafe_vsr_default_tea-vsr_tea-v2", + Servers: []version2.UpstreamServer{ + { + Address: "10.0.0.22:80", + }, + }, + }, + }, + HTTPSnippets: []string{}, + LimitReqZones: []version2.LimitReqZone{}, + CacheZones: []version2.CacheZone{ + { + Name: "default_cafe_default_tea-vsr_vsr-cache", + Size: "20m", + Path: "/var/cache/nginx/default_cafe_default_tea-vsr_vsr-cache", + Levels: "", + }, + }, + Server: version2.Server{ + ServerName: "cafe.example.com", + StatusZone: "cafe.example.com", + ServerTokens: "off", + VSNamespace: "default", + VSName: "cafe", + Locations: []version2.Location{ + { + Path: "/tea/v1", + ProxyPass: "http://vs_default_cafe_vsr_default_tea-vsr_tea-v1", + ProxyNextUpstream: "error timeout", + ProxyNextUpstreamTimeout: "0s", + ProxyNextUpstreamTries: 0, + ProxySSLName: "tea-v1-svc.default.svc", + ProxyPassRequestHeaders: true, + ProxySetHeaders: []version2.Header{{Name: "Host", Value: "$host"}}, + ServiceName: "tea-v1-svc", + IsVSR: true, + VSRName: "tea-vsr", + VSRNamespace: "default", + Cache: &version2.Cache{ + ZoneName: "default_cafe_default_tea-vsr_vsr-cache", + ZoneSize: "20m", + Time: "2h", + Valid: map[string]string{}, + AllowedMethods: nil, + CachePurgeAllow: []string{"127.0.0.1"}, + OverrideUpstreamCache: true, + Levels: "", + }, + }, + { + Path: "/tea/v2", + ProxyPass: "http://vs_default_cafe_vsr_default_tea-vsr_tea-v2", + ProxyNextUpstream: "error timeout", + ProxyNextUpstreamTimeout: "0s", + ProxyNextUpstreamTries: 0, + ProxySSLName: "tea-v2-svc.default.svc", + ProxyPassRequestHeaders: true, + ProxySetHeaders: []version2.Header{{Name: "Host", Value: "$host"}}, + ServiceName: "tea-v2-svc", + IsVSR: true, + VSRName: "tea-vsr", + VSRNamespace: "default", + }, + }, + }, + }, + }, + } + + baseCfgParams := ConfigParams{ + Context: context.Background(), + ServerTokens: "off", + } + + vsc := newVirtualServerConfigurator( + &baseCfgParams, + false, + false, + &StaticConfigParams{}, + false, + &fakeBV, + ) + + for _, test := range tests { + result, warnings := vsc.GenerateVirtualServerConfig(&test.virtualServerEx, nil, nil) + + if diff := cmp.Diff(test.expected, result); diff != "" { + t.Errorf("GenerateVirtualServerConfig() mismatch (-want +got):\n%s", diff) + t.Error(test.msg) + } + + if len(warnings) != 0 { + t.Errorf("GenerateVirtualServerConfig returned warnings: %v", warnings) + } + } +} + func TestGeneratePolicies(t *testing.T) { t.Parallel() + ctx := context.Background() ownerDetails := policyOwnerDetails{ owner: nil, // nil is OK for the unit test ownerNamespace: "default", vsNamespace: "default", vsName: "test", + ownerName: "test", } mTLSCertPath := "/etc/nginx/secrets/default-ingress-mtls-secret-ca.crt" mTLSCrlPath := "/etc/nginx/secrets/default-ingress-mtls-secret-ca.crl" @@ -12281,6 +12828,219 @@ func TestGeneratePolicies(t *testing.T) { }, msg: "WAF reference", }, + { + policyRefs: []conf_v1.PolicyReference{ + { + Name: "cache-policy-basic", + Namespace: "default", + }, + }, + policies: map[string]*conf_v1.Policy{ + "default/cache-policy-basic": { + ObjectMeta: meta_v1.ObjectMeta{ + Name: "cache-policy-basic", + Namespace: "default", + }, + Spec: conf_v1.PolicySpec{ + Cache: &conf_v1.Cache{ + CacheZoneName: "basic-cache", + CacheZoneSize: "10m", + }, + }, + }, + }, + expected: policiesCfg{ + Context: ctx, + Cache: &version2.Cache{ + ZoneName: "default_test_basic-cache", + ZoneSize: "10m", + Valid: map[string]string{}, + }, + }, + msg: "basic cache policy reference", + }, + { + policyRefs: []conf_v1.PolicyReference{ + { + Name: "cache-policy-full", + Namespace: "default", + }, + }, + policies: map[string]*conf_v1.Policy{ + "default/cache-policy-full": { + ObjectMeta: meta_v1.ObjectMeta{ + Name: "cache-policy-full", + Namespace: "default", + }, + Spec: conf_v1.PolicySpec{ + Cache: &conf_v1.Cache{ + CacheZoneName: "full-cache", + CacheZoneSize: "100m", + AllowedCodes: []intstr.IntOrString{intstr.FromString("any")}, + AllowedMethods: []string{"GET", "HEAD", "POST"}, + Time: "1h", + OverrideUpstreamCache: true, + Levels: "1:2", + }, + }, + }, + }, + expected: policiesCfg{ + Context: ctx, + Cache: &version2.Cache{ + ZoneName: "default_test_full-cache", + ZoneSize: "100m", + Time: "1h", + Valid: map[string]string{"any": "1h"}, + AllowedMethods: []string{"GET", "HEAD", "POST"}, + OverrideUpstreamCache: true, + Levels: "1:2", + }, + }, + msg: "full cache policy with all options", + }, + { + policyRefs: []conf_v1.PolicyReference{ + { + Name: "cache-policy-status-codes", + Namespace: "default", + }, + }, + policies: map[string]*conf_v1.Policy{ + "default/cache-policy-status-codes": { + ObjectMeta: meta_v1.ObjectMeta{ + Name: "cache-policy-status-codes", + Namespace: "default", + }, + Spec: conf_v1.PolicySpec{ + Cache: &conf_v1.Cache{ + CacheZoneName: "status-cache", + CacheZoneSize: "50m", + AllowedCodes: []intstr.IntOrString{ + intstr.FromInt(200), + intstr.FromInt(301), + intstr.FromInt(404), + }, + Time: "30m", + }, + }, + }, + }, + expected: policiesCfg{ + Context: ctx, + Cache: &version2.Cache{ + ZoneName: "default_test_status-cache", + ZoneSize: "50m", + Time: "30m", + Valid: map[string]string{ + "200": "30m", + "301": "30m", + "404": "30m", + }, + }, + }, + msg: "cache policy with specific status codes", + }, + { + policyRefs: []conf_v1.PolicyReference{ + { + Name: "cache-policy-methods", + Namespace: "default", + }, + }, + policies: map[string]*conf_v1.Policy{ + "default/cache-policy-methods": { + ObjectMeta: meta_v1.ObjectMeta{ + Name: "cache-policy-methods", + Namespace: "default", + }, + Spec: conf_v1.PolicySpec{ + Cache: &conf_v1.Cache{ + CacheZoneName: "methods-cache", + CacheZoneSize: "25m", + AllowedMethods: []string{"GET", "HEAD"}, + Levels: "2:2", + }, + }, + }, + }, + expected: policiesCfg{ + Context: ctx, + Cache: &version2.Cache{ + ZoneName: "default_test_methods-cache", + ZoneSize: "25m", + Valid: map[string]string{}, + AllowedMethods: []string{"GET", "HEAD"}, + Levels: "2:2", + }, + }, + msg: "cache policy with allowed methods and levels", + }, + { + policyRefs: []conf_v1.PolicyReference{ + { + Name: "cache-policy-purge", + Namespace: "default", + }, + }, + policies: map[string]*conf_v1.Policy{ + "default/cache-policy-purge": { + ObjectMeta: meta_v1.ObjectMeta{ + Name: "cache-policy-purge", + Namespace: "default", + }, + Spec: conf_v1.PolicySpec{ + Cache: &conf_v1.Cache{ + CacheZoneName: "purge-cache", + CacheZoneSize: "75m", + CachePurgeAllow: []string{"192.168.1.0/24", "10.0.0.1"}, + }, + }, + }, + }, + expected: policiesCfg{ + Context: ctx, + Cache: &version2.Cache{ + ZoneName: "default_test_purge-cache", + ZoneSize: "75m", + Valid: map[string]string{}, + CachePurgeAllow: []string{"192.168.1.0/24", "10.0.0.1"}, + }, + }, + msg: "cache policy with purge allow IPs", + }, + { + policyRefs: []conf_v1.PolicyReference{ + { + Name: "cache-policy-implicit", + }, + }, + policies: map[string]*conf_v1.Policy{ + "default/cache-policy-implicit": { + ObjectMeta: meta_v1.ObjectMeta{ + Name: "cache-policy-implicit", + Namespace: "default", + }, + Spec: conf_v1.PolicySpec{ + Cache: &conf_v1.Cache{ + CacheZoneName: "implicit-cache", + CacheZoneSize: "15m", + Time: "45m", + }, + }, + }, + }, + expected: policiesCfg{ + Context: ctx, + Cache: &version2.Cache{ + ZoneName: "default_test_implicit-cache", + ZoneSize: "15m", + Time: "45m", + Valid: map[string]string{}, + }, + }, + msg: "implicit cache policy reference", + }, } vsc := newVirtualServerConfigurator(&ConfigParams{Context: ctx}, false, false, &StaticConfigParams{}, false, &fakeBV) diff --git a/internal/k8s/controller_test.go b/internal/k8s/controller_test.go index e23443bd2d..93b8eacdc2 100644 --- a/internal/k8s/controller_test.go +++ b/internal/k8s/controller_test.go @@ -2109,7 +2109,7 @@ func TestGetPoliciesGlobalWatch(t *testing.T) { expectedPolicies := []*conf_v1.Policy{validPolicy} expectedErrors := []error{ - errors.New("policy default/invalid-policy is invalid: spec: Invalid value: \"\": must specify exactly one of: `accessControl`, `rateLimit`, `ingressMTLS`, `egressMTLS`, `basicAuth`, `apiKey`, `jwt`, `oidc`, `waf`"), + errors.New("policy default/invalid-policy is invalid: spec: Invalid value: \"\": must specify exactly one of: `accessControl`, `rateLimit`, `ingressMTLS`, `egressMTLS`, `basicAuth`, `apiKey`, `cache`, `jwt`, `oidc`, `waf`"), errors.New("policy nginx-ingress/valid-policy doesn't exist"), errors.New("failed to get policy nginx-ingress/some-policy: GetByKey error"), errors.New("referenced policy default/valid-policy-ingress-class has incorrect ingress class: test-class (controller ingress class: )"), @@ -2207,7 +2207,7 @@ func TestGetPoliciesNamespacedWatch(t *testing.T) { expectedPolicies := []*conf_v1.Policy{validPolicy} expectedErrors := []error{ - errors.New("policy default/invalid-policy is invalid: spec: Invalid value: \"\": must specify exactly one of: `accessControl`, `rateLimit`, `ingressMTLS`, `egressMTLS`, `basicAuth`, `apiKey`, `jwt`, `oidc`, `waf`"), + errors.New("policy default/invalid-policy is invalid: spec: Invalid value: \"\": must specify exactly one of: `accessControl`, `rateLimit`, `ingressMTLS`, `egressMTLS`, `basicAuth`, `apiKey`, `cache`, `jwt`, `oidc`, `waf`"), errors.New("failed to get namespace nginx-ingress"), errors.New("referenced policy default/valid-policy-ingress-class has incorrect ingress class: test-class (controller ingress class: )"), } diff --git a/pkg/apis/configuration/v1/types.go b/pkg/apis/configuration/v1/types.go index df32e367dc..ca43fc4db8 100644 --- a/pkg/apis/configuration/v1/types.go +++ b/pkg/apis/configuration/v1/types.go @@ -2,6 +2,7 @@ package v1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" ) const ( @@ -788,6 +789,8 @@ type PolicySpec struct { WAF *WAF `json:"waf"` // The API Key policy configures NGINX to authorize requests which provide a valid API Key in a specified header or query param. APIKey *APIKey `json:"apiKey"` + // The Cache Key defines a cache policy for proxy caching + Cache *Cache `json:"cache"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -1003,3 +1006,65 @@ type SuppliedIn struct { // The location of the API Key as a query param. For example, $arg_apikey. Accepted variables are $arg_. Query []string `json:"query"` } + +// Cache defines a cache policy for proxy caching. +// +kubebuilder:validation:XValidation:rule="!has(self.allowedCodes) || (has(self.allowedCodes) && has(self.time))",message="time is required when allowedCodes is specified" +type Cache struct { + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^[a-z][a-zA-Z0-9_]*[a-zA-Z0-9]$|^[a-z]$` + // CacheZoneName defines the name of the cache zone. Must start with a lowercase letter, + // followed by alphanumeric characters or underscores, and end with an alphanumeric character. + // Single lowercase letters are also allowed. Examples: "cache", "my_cache", "cache1". + CacheZoneName string `json:"cacheZoneName"` + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^[0-9]+[kmg]$` + // CacheZoneSize defines the size of the cache zone. Must be a number followed by a size unit: + // 'k' for kilobytes, 'm' for megabytes, or 'g' for gigabytes. + // Examples: "10m", "1g", "512k". + CacheZoneSize string `json:"cacheZoneSize"` + // +kubebuilder:validation:Optional + // AllowedCodes defines which HTTP response codes should be cached. + // Accepts either: + // - The string "any" to cache all response codes (must be the only element) + // - A list of HTTP status codes as integers (100-599) + // Examples: ["any"], [200, 301, 404], [200]. + // Invalid: ["any", 200] (cannot mix "any" with specific codes). + AllowedCodes []intstr.IntOrString `json:"allowedCodes,omitempty"` + // +kubebuilder:validation:Optional + // +kubebuilder:validation:MaxItems=3 + // +kubebuilder:validation:XValidation:rule="self.all(method, method in ['GET', 'HEAD', 'POST'])",message="allowed methods must be one of: GET, HEAD, POST" + // AllowedMethods defines which HTTP methods should be cached. + // Only "GET", "HEAD", and "POST" are supported by NGINX proxy_cache_methods directive. + // GET and HEAD are always cached by default even if not specified. + // Maximum of 3 items allowed. Examples: ["GET"], ["GET", "HEAD", "POST"]. + // Invalid methods: PUT, DELETE, PATCH, etc. + AllowedMethods []string `json:"allowedMethods,omitempty"` + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Pattern=`^[0-9]+[smhd]$` + // Time defines the default cache time. Required when allowedCodes is specified. + // Must be a number followed by a time unit: + // 's' for seconds, 'm' for minutes, 'h' for hours, 'd' for days. + // Examples: "30s", "5m", "1h", "2d". + Time string `json:"time,omitempty"` + // +kubebuilder:validation:Optional + // CachePurgeAllow defines IP addresses or CIDR blocks allowed to purge cache. + // This feature is only available in NGINX Plus. + // Examples: ["192.168.1.100", "10.0.0.0/8", "::1"]. + // Invalid in NGINX OSS (will be ignored). + CachePurgeAllow []string `json:"cachePurgeAllow,omitempty"` + // +kubebuilder:validation:Optional + // +kubebuilder:default=false + // OverrideUpstreamCache controls whether to override upstream cache headers + // (using proxy_ignore_headers directive). When true, NGINX will ignore + // cache-related headers from upstream servers like Cache-Control, Expires, etc. + // Default: false. + OverrideUpstreamCache bool `json:"overrideUpstreamCache,omitempty"` + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Pattern=`^[12](?::[12]){0,2}$` + // Levels defines the cache directory hierarchy levels for storing cached files. + // Must be in format "X:Y" or "X:Y:Z" where X, Y, Z are either 1 or 2. + // This controls the number of subdirectory levels and their name lengths. + // Examples: "1:2", "2:2", "1:2:2". + // Invalid: "3:1", "1:3", "1:2:3". + Levels string `json:"levels,omitempty"` +} diff --git a/pkg/apis/configuration/v1/zz_generated.deepcopy.go b/pkg/apis/configuration/v1/zz_generated.deepcopy.go index 75049fea24..943fb76a03 100644 --- a/pkg/apis/configuration/v1/zz_generated.deepcopy.go +++ b/pkg/apis/configuration/v1/zz_generated.deepcopy.go @@ -7,6 +7,7 @@ package v1 import ( runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -183,6 +184,37 @@ func (in *BasicAuth) DeepCopy() *BasicAuth { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cache) DeepCopyInto(out *Cache) { + *out = *in + if in.AllowedCodes != nil { + in, out := &in.AllowedCodes, &out.AllowedCodes + *out = make([]intstr.IntOrString, len(*in)) + copy(*out, *in) + } + if in.AllowedMethods != nil { + in, out := &in.AllowedMethods, &out.AllowedMethods + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.CachePurgeAllow != nil { + in, out := &in.CachePurgeAllow, &out.CachePurgeAllow + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cache. +func (in *Cache) DeepCopy() *Cache { + if in == nil { + return nil + } + out := new(Cache) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CertManager) DeepCopyInto(out *CertManager) { *out = *in @@ -731,6 +763,11 @@ func (in *PolicySpec) DeepCopyInto(out *PolicySpec) { *out = new(APIKey) (*in).DeepCopyInto(*out) } + if in.Cache != nil { + in, out := &in.Cache, &out.Cache + *out = new(Cache) + (*in).DeepCopyInto(*out) + } return } diff --git a/pkg/apis/configuration/validation/policy.go b/pkg/apis/configuration/validation/policy.go index 96866c4821..73e8ec76e2 100644 --- a/pkg/apis/configuration/validation/policy.go +++ b/pkg/apis/configuration/validation/policy.go @@ -91,8 +91,13 @@ func validatePolicySpec(spec *v1.PolicySpec, fieldPath *field.Path, isPlus, enab fieldCount++ } + if spec.Cache != nil { + allErrs = append(allErrs, validateCache(spec.Cache, fieldPath.Child("cache"), isPlus)...) + fieldCount++ + } + if fieldCount != 1 { - msg := "must specify exactly one of: `accessControl`, `rateLimit`, `ingressMTLS`, `egressMTLS`, `basicAuth`, `apiKey`" + msg := "must specify exactly one of: `accessControl`, `rateLimit`, `ingressMTLS`, `egressMTLS`, `basicAuth`, `apiKey`, `cache`" if isPlus { msg = fmt.Sprint(msg, ", `jwt`, `oidc`, `waf`") } @@ -418,6 +423,86 @@ func validateLogConfs(logs []*v1.SecurityLog, fieldPath *field.Path, bundleMode return allErrs } +// validateCache validates a cache policy +func validateCache(cache *v1.Cache, fieldPath *field.Path, isPlus bool) field.ErrorList { + allErrs := field.ErrorList{} + + allErrs = append(allErrs, validateCacheAllowedCodes(cache, fieldPath)...) + + allErrs = append(allErrs, validateCachePlusFeatures(cache, fieldPath, isPlus)...) + + return allErrs +} + +// validateCacheAllowedCodes validates the allowedCodes field +func validateCacheAllowedCodes(cache *v1.Cache, fieldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(cache.AllowedCodes) == 0 { + return allErrs // No validation needed for empty slice + } + + // Check if it's the special case: single element "any" + if len(cache.AllowedCodes) == 1 && cache.AllowedCodes[0].Type == 1 && cache.AllowedCodes[0].StrVal == "any" { + return allErrs // Valid: single "any" string + } + + // Check if it contains "any" mixed with other codes (invalid) + hasAny := false + for i, code := range cache.AllowedCodes { + if code.Type == 1 && code.StrVal == "any" { + hasAny = true + if len(cache.AllowedCodes) > 1 { + allErrs = append(allErrs, field.Invalid(fieldPath.Child("allowedCodes").Index(i), code.StrVal, "the string 'any' cannot be mixed with other codes")) + } + } + } + + // If we have "any" mixed with others, we already reported the error above + if hasAny { + return allErrs + } + + // Validate all elements are integers in the range 100-599 + for i, code := range cache.AllowedCodes { + if code.Type == 1 { // String type + allErrs = append(allErrs, field.Invalid(fieldPath.Child("allowedCodes").Index(i), code.StrVal, "must be an integer HTTP status code (100-599) or the single string 'any'")) + } else { // Integer type + intVal := int(code.IntVal) + if intVal < 100 || intVal > 599 { + allErrs = append(allErrs, field.Invalid(fieldPath.Child("allowedCodes").Index(i), intVal, "HTTP status code must be between 100 and 599")) + } + } + } + + return allErrs +} + +// validateCachePlusFeatures validates NGINX Plus specific features, such as cache purge allow IPs +func validateCachePlusFeatures(cache *v1.Cache, fieldPath *field.Path, isPlus bool) field.ErrorList { + allErrs := field.ErrorList{} + + // Validate cache purge allow IPs if provided + if len(cache.CachePurgeAllow) > 0 { + // Check if NGINX Plus is required for cache purge + if !isPlus { + allErrs = append(allErrs, field.Forbidden(fieldPath.Child("cachePurgeAllow"), "cache purge is only supported in NGINX Plus")) + } else { + // Validate IP addresses/CIDRs if NGINX Plus is available + for i, ip := range cache.CachePurgeAllow { + if net.ParseIP(ip) == nil { + // Try parsing as CIDR + if _, _, err := net.ParseCIDR(ip); err != nil { + allErrs = append(allErrs, field.Invalid(fieldPath.Child("cachePurgeAllow").Index(i), ip, "must be a valid IP address or CIDR")) + } + } + } + } + } + + return allErrs +} + func validateLogConf(logConf *v1.SecurityLog, fieldPath *field.Path, bundleMode bool) field.ErrorList { allErrs := field.ErrorList{} diff --git a/pkg/apis/configuration/validation/policy_test.go b/pkg/apis/configuration/validation/policy_test.go index 4dc5b2b739..24339073dc 100644 --- a/pkg/apis/configuration/validation/policy_test.go +++ b/pkg/apis/configuration/validation/policy_test.go @@ -4,6 +4,7 @@ import ( "testing" v1 "github.com/nginx/kubernetes-ingress/pkg/apis/configuration/v1" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/validation/field" ) @@ -2424,3 +2425,258 @@ func TestValidateWAF_FailsOnInvalidApLogBundle(t *testing.T) { }) } } + +func TestValidatePolicy_IsNotValidCachePolicy(t *testing.T) { + t.Parallel() + + tt := []struct { + name string + policy *v1.Policy + isPlus bool + }{ + { + name: "cache purge not allowed on OSS", + policy: &v1.Policy{ + Spec: v1.PolicySpec{ + Cache: &v1.Cache{ + CacheZoneName: "purgeoss", + CacheZoneSize: "10m", + CachePurgeAllow: []string{"192.168.1.1"}, + }, + }, + }, + isPlus: false, + }, + { + name: "invalid IP address in purge allow", + policy: &v1.Policy{ + Spec: v1.PolicySpec{ + Cache: &v1.Cache{ + CacheZoneName: "invalidip", + CacheZoneSize: "10m", + CachePurgeAllow: []string{"invalid-ip"}, + }, + }, + }, + isPlus: true, + }, + { + name: "allowedCodes with 'any' mixed with integers", + policy: &v1.Policy{ + Spec: v1.PolicySpec{ + Cache: &v1.Cache{ + CacheZoneName: "test", + CacheZoneSize: "10m", + AllowedCodes: []intstr.IntOrString{intstr.FromString("any"), intstr.FromInt(200)}, + }, + }, + }, + isPlus: false, + }, + { + name: "allowedCodes with invalid string", + policy: &v1.Policy{ + Spec: v1.PolicySpec{ + Cache: &v1.Cache{ + CacheZoneName: "test", + CacheZoneSize: "10m", + AllowedCodes: []intstr.IntOrString{intstr.FromString("invalid")}, + }, + }, + }, + isPlus: false, + }, + { + name: "allowedCodes with status code below 100", + policy: &v1.Policy{ + Spec: v1.PolicySpec{ + Cache: &v1.Cache{ + CacheZoneName: "test", + CacheZoneSize: "10m", + AllowedCodes: []intstr.IntOrString{intstr.FromInt(99)}, + }, + }, + }, + isPlus: false, + }, + { + name: "allowedCodes with status code above 599", + policy: &v1.Policy{ + Spec: v1.PolicySpec{ + Cache: &v1.Cache{ + CacheZoneName: "test", + CacheZoneSize: "10m", + AllowedCodes: []intstr.IntOrString{intstr.FromInt(600)}, + }, + }, + }, + isPlus: false, + }, + { + name: "allowedCodes with multiple 'any' strings", + policy: &v1.Policy{ + Spec: v1.PolicySpec{ + Cache: &v1.Cache{ + CacheZoneName: "test", + CacheZoneSize: "10m", + AllowedCodes: []intstr.IntOrString{intstr.FromString("any"), intstr.FromString("any")}, + }, + }, + }, + isPlus: false, + }, + { + name: "allowedCodes with valid and invalid status codes", + policy: &v1.Policy{ + Spec: v1.PolicySpec{ + Cache: &v1.Cache{ + CacheZoneName: "test", + CacheZoneSize: "10m", + AllowedCodes: []intstr.IntOrString{intstr.FromInt(200), intstr.FromInt(700)}, + }, + }, + }, + isPlus: false, + }, + } + + for _, tc := range tt { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + err := ValidatePolicy(tc.policy, tc.isPlus, false, false) + if err == nil { + t.Errorf("got no errors on invalid Cache policy spec input") + } + }) + } +} + +func TestValidatePolicy_IsValidCachePolicy(t *testing.T) { + t.Parallel() + + tt := []struct { + name string + policy *v1.Policy + isPlus bool + }{ + { + name: "basic cache policy", + policy: &v1.Policy{ + Spec: v1.PolicySpec{ + Cache: &v1.Cache{ + CacheZoneName: "basiccache", + CacheZoneSize: "10m", + }, + }, + }, + isPlus: false, + }, + { + name: "cache policy with all options", + policy: &v1.Policy{ + Spec: v1.PolicySpec{ + Cache: &v1.Cache{ + CacheZoneName: "fullcache", + CacheZoneSize: "100m", + AllowedCodes: []intstr.IntOrString{intstr.FromString("any")}, + AllowedMethods: []string{"GET", "HEAD", "POST"}, + Time: "2h", + OverrideUpstreamCache: true, + Levels: "1:2", + }, + }, + }, + isPlus: false, + }, + { + name: "cache policy with purge (NGINX Plus)", + policy: &v1.Policy{ + Spec: v1.PolicySpec{ + Cache: &v1.Cache{ + CacheZoneName: "purgecache", + CacheZoneSize: "50m", + CachePurgeAllow: []string{"10.0.0.0/8", "192.168.1.100"}, + }, + }, + }, + isPlus: true, + }, + { + name: "cache policy with IPv6 purge addresses", + policy: &v1.Policy{ + Spec: v1.PolicySpec{ + Cache: &v1.Cache{ + CacheZoneName: "ipv6cache", + CacheZoneSize: "20m", + CachePurgeAllow: []string{"2001:db8::1", "fe80::/64"}, + }, + }, + }, + isPlus: true, + }, + { + name: "cache policy with specific allowed codes", + policy: &v1.Policy{ + Spec: v1.PolicySpec{ + Cache: &v1.Cache{ + CacheZoneName: "codecache", + CacheZoneSize: "15m", + AllowedCodes: []intstr.IntOrString{intstr.FromInt(200), intstr.FromInt(404), intstr.FromInt(500)}, + }, + }, + }, + isPlus: false, + }, + { + name: "cache policy with edge case status codes", + policy: &v1.Policy{ + Spec: v1.PolicySpec{ + Cache: &v1.Cache{ + CacheZoneName: "edgecase", + CacheZoneSize: "5m", + AllowedCodes: []intstr.IntOrString{intstr.FromInt(100), intstr.FromInt(599)}, + }, + }, + }, + isPlus: false, + }, + { + name: "cache policy with purge and CIDR range", + policy: &v1.Policy{ + Spec: v1.PolicySpec{ + Cache: &v1.Cache{ + CacheZoneName: "cidrpurge", + CacheZoneSize: "20m", + CachePurgeAllow: []string{"192.168.1.0/24", "10.0.0.1"}, + }, + }, + }, + isPlus: true, + }, + { + name: "cache policy with empty allowed codes", + policy: &v1.Policy{ + Spec: v1.PolicySpec{ + Cache: &v1.Cache{ + CacheZoneName: "emptycode", + CacheZoneSize: "10m", + AllowedCodes: []intstr.IntOrString{}, + }, + }, + }, + isPlus: false, + }, + } + + for _, tc := range tt { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + err := ValidatePolicy(tc.policy, tc.isPlus, false, false) + if err != nil { + t.Errorf("want no errors, got %+v\n", err) + } + }) + } +} diff --git a/pyproject.toml b/pyproject.toml index 63f3570c69..0405d7d069 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -53,6 +53,7 @@ markers = [ "policies_jwt", "policies_ac", "policies_mtls", + "policies_cache", "rewrite", "skip_for_nginx_oss", "skip_for_loadbalancer", diff --git a/tests/data/cache-policy/policies/cache-policy-advanced.yaml b/tests/data/cache-policy/policies/cache-policy-advanced.yaml new file mode 100644 index 0000000000..2e7da75f9e --- /dev/null +++ b/tests/data/cache-policy/policies/cache-policy-advanced.yaml @@ -0,0 +1,13 @@ +apiVersion: k8s.nginx.org/v1 +kind: Policy +metadata: + name: cache-policy-advanced +spec: + cache: + cacheZoneName: "advancedcache" + cacheZoneSize: "20m" + allowedCodes: [200, 404, 301] + allowedMethods: ["GET", "HEAD", "POST"] + time: "2h" + overrideUpstreamCache: true + levels: "2:2" diff --git a/tests/data/cache-policy/policies/cache-policy-basic.yaml b/tests/data/cache-policy/policies/cache-policy-basic.yaml new file mode 100644 index 0000000000..761f17e23a --- /dev/null +++ b/tests/data/cache-policy/policies/cache-policy-basic.yaml @@ -0,0 +1,12 @@ +apiVersion: k8s.nginx.org/v1 +kind: Policy +metadata: + name: cache-policy-basic +spec: + cache: + cacheZoneName: "basiccache" + cacheZoneSize: "5m" + allowedCodes: ["any"] + allowedMethods: ["GET"] + time: "30m" + overrideUpstreamCache: true diff --git a/tests/data/cache-policy/policies/cache-policy-purge.yaml b/tests/data/cache-policy/policies/cache-policy-purge.yaml new file mode 100644 index 0000000000..9e6a34d4fe --- /dev/null +++ b/tests/data/cache-policy/policies/cache-policy-purge.yaml @@ -0,0 +1,12 @@ +apiVersion: k8s.nginx.org/v1 +kind: Policy +metadata: + name: cache-policy-purge +spec: + cache: + cacheZoneName: "mycache" + cacheZoneSize: "5m" + allowedCodes: ["any"] + time: "10m" + overrideUpstreamCache: true + cachePurgeAllow: ["0.0.0.0/0"] diff --git a/tests/data/cache-policy/route/virtual-server-cache-policy-advanced-route.yaml b/tests/data/cache-policy/route/virtual-server-cache-policy-advanced-route.yaml new file mode 100644 index 0000000000..3bf79d2ad2 --- /dev/null +++ b/tests/data/cache-policy/route/virtual-server-cache-policy-advanced-route.yaml @@ -0,0 +1,24 @@ +apiVersion: k8s.nginx.org/v1 +kind: VirtualServer +metadata: + name: virtual-server +spec: + server-snippets: | + add_header X-Cache-Status $upstream_cache_status; + host: virtual-server.example.com + upstreams: + - name: backend2 + service: backend2-svc + port: 80 + - name: backend1 + service: backend1-svc + port: 80 + routes: + - path: "/backend1" + policies: + - name: cache-policy-advanced + action: + pass: backend1 + - path: "/backend2" + action: + pass: backend2 diff --git a/tests/data/cache-policy/route/virtual-server-cache-policy-basic-route.yaml b/tests/data/cache-policy/route/virtual-server-cache-policy-basic-route.yaml new file mode 100644 index 0000000000..11f1877a0d --- /dev/null +++ b/tests/data/cache-policy/route/virtual-server-cache-policy-basic-route.yaml @@ -0,0 +1,24 @@ +apiVersion: k8s.nginx.org/v1 +kind: VirtualServer +metadata: + name: virtual-server +spec: + server-snippets: | + add_header X-Cache-Status $upstream_cache_status; + host: virtual-server.example.com + upstreams: + - name: backend2 + service: backend2-svc + port: 80 + - name: backend1 + service: backend1-svc + port: 80 + routes: + - path: "/backend1" + policies: + - name: cache-policy-basic + action: + pass: backend1 + - path: "/backend2" + action: + pass: backend2 diff --git a/tests/data/cache-policy/spec/virtual-server-cache-policy-advanced-spec.yaml b/tests/data/cache-policy/spec/virtual-server-cache-policy-advanced-spec.yaml new file mode 100644 index 0000000000..1b13317837 --- /dev/null +++ b/tests/data/cache-policy/spec/virtual-server-cache-policy-advanced-spec.yaml @@ -0,0 +1,24 @@ +apiVersion: k8s.nginx.org/v1 +kind: VirtualServer +metadata: + name: virtual-server +spec: + server-snippets: | + add_header X-Cache-Status $upstream_cache_status; + host: virtual-server.example.com + policies: + - name: cache-policy-advanced + upstreams: + - name: backend2 + service: backend2-svc + port: 80 + - name: backend1 + service: backend1-svc + port: 80 + routes: + - path: "/backend1" + action: + pass: backend1 + - path: "/backend2" + action: + pass: backend2 diff --git a/tests/data/cache-policy/spec/virtual-server-cache-policy-basic-spec.yaml b/tests/data/cache-policy/spec/virtual-server-cache-policy-basic-spec.yaml new file mode 100644 index 0000000000..4c8b3f62c9 --- /dev/null +++ b/tests/data/cache-policy/spec/virtual-server-cache-policy-basic-spec.yaml @@ -0,0 +1,24 @@ +apiVersion: k8s.nginx.org/v1 +kind: VirtualServer +metadata: + name: virtual-server +spec: + server-snippets: | + add_header X-Cache-Status $upstream_cache_status; + host: virtual-server.example.com + policies: + - name: cache-policy-basic + upstreams: + - name: backend2 + service: backend2-svc + port: 80 + - name: backend1 + service: backend1-svc + port: 80 + routes: + - path: "/backend1" + action: + pass: backend1 + - path: "/backend2" + action: + pass: backend2 diff --git a/tests/data/cache-policy/spec/virtual-server-cache-policy-purge.yaml b/tests/data/cache-policy/spec/virtual-server-cache-policy-purge.yaml new file mode 100644 index 0000000000..d65da8633e --- /dev/null +++ b/tests/data/cache-policy/spec/virtual-server-cache-policy-purge.yaml @@ -0,0 +1,24 @@ +apiVersion: k8s.nginx.org/v1 +kind: VirtualServer +metadata: + name: virtual-server +spec: + server-snippets: | + add_header X-Cache-Status $upstream_cache_status; + host: virtual-server.example.com + policies: + - name: cache-policy-purge + upstreams: + - name: backend2 + service: backend2-svc + port: 80 + - name: backend1 + service: backend1-svc + port: 80 + routes: + - path: "/backend1" + action: + pass: backend1 + - path: "/backend2" + action: + pass: backend2 diff --git a/tests/data/cache-policy/vsr/virtual-server-route-cache-policy-advanced.yaml b/tests/data/cache-policy/vsr/virtual-server-route-cache-policy-advanced.yaml new file mode 100644 index 0000000000..caf52d05f1 --- /dev/null +++ b/tests/data/cache-policy/vsr/virtual-server-route-cache-policy-advanced.yaml @@ -0,0 +1,22 @@ +apiVersion: k8s.nginx.org/v1 +kind: VirtualServerRoute +metadata: + name: backends +spec: + host: virtual-server-route.example.com + upstreams: + - name: backend1 + service: backend1-svc + port: 80 + - name: backend3 + service: backend3-svc + port: 80 + subroutes: + - path: "/backends/backend1" + policies: + - name: cache-policy-advanced + action: + pass: backend1 + - path: "/backends/backend3" + action: + pass: backend3 diff --git a/tests/data/cache-policy/vsr/virtual-server-route-cache-policy-basic.yaml b/tests/data/cache-policy/vsr/virtual-server-route-cache-policy-basic.yaml new file mode 100644 index 0000000000..f479fc4cbc --- /dev/null +++ b/tests/data/cache-policy/vsr/virtual-server-route-cache-policy-basic.yaml @@ -0,0 +1,22 @@ +apiVersion: k8s.nginx.org/v1 +kind: VirtualServerRoute +metadata: + name: backends +spec: + host: virtual-server-route.example.com + upstreams: + - name: backend1 + service: backend1-svc + port: 80 + - name: backend3 + service: backend3-svc + port: 80 + subroutes: + - path: "/backends/backend1" + policies: + - name: cache-policy-basic + action: + pass: backend1 + - path: "/backends/backend3" + action: + pass: backend3 diff --git a/tests/data/cache-policy/vsr/virtual-server-route-cache-policy-purge.yaml b/tests/data/cache-policy/vsr/virtual-server-route-cache-policy-purge.yaml new file mode 100644 index 0000000000..d380be2e76 --- /dev/null +++ b/tests/data/cache-policy/vsr/virtual-server-route-cache-policy-purge.yaml @@ -0,0 +1,22 @@ +apiVersion: k8s.nginx.org/v1 +kind: VirtualServerRoute +metadata: + name: backends +spec: + host: virtual-server-route.example.com + upstreams: + - name: backend1 + service: backend1-svc + port: 80 + - name: backend3 + service: backend3-svc + port: 80 + subroutes: + - path: "/backends/backend1" + policies: + - name: cache-policy-purge + action: + pass: backend1 + - path: "/backends/backend3" + action: + pass: backend3 diff --git a/tests/data/cache-policy/vsr/virtual-server.yaml b/tests/data/cache-policy/vsr/virtual-server.yaml new file mode 100644 index 0000000000..9f257c4c13 --- /dev/null +++ b/tests/data/cache-policy/vsr/virtual-server.yaml @@ -0,0 +1,13 @@ +apiVersion: k8s.nginx.org/v1 +kind: VirtualServer +metadata: + name: virtual-server-route +spec: + server-snippets: | + add_header X-Cache-Status $upstream_cache_status; + host: virtual-server-route.example.com + routes: + - path: "/backends" + route: backends # implicit namespace + - path: "/backend2" + route: backend2-namespace/backend2 diff --git a/tests/suite/test_cache_policies_vs.py b/tests/suite/test_cache_policies_vs.py new file mode 100644 index 0000000000..4ac7240265 --- /dev/null +++ b/tests/suite/test_cache_policies_vs.py @@ -0,0 +1,287 @@ +import re + +import pytest +import requests +from settings import TEST_DATA +from suite.utils.policy_resources_utils import create_policy_from_yaml, delete_policy +from suite.utils.resources_utils import ensure_connection_to_public_endpoint, pod_restart, wait_before_test +from suite.utils.vs_vsr_resources_utils import delete_and_create_vs_from_yaml + +std_vs_src = f"{TEST_DATA}/virtual-server/standard/virtual-server.yaml" +cache_pol_basic_src = f"{TEST_DATA}/cache-policy/policies/cache-policy-basic.yaml" +cache_pol_advanced_src = f"{TEST_DATA}/cache-policy/policies/cache-policy-advanced.yaml" +cache_pol_purge_src = f"{TEST_DATA}/cache-policy/policies/cache-policy-purge.yaml" +cache_vs_basic_spec_src = f"{TEST_DATA}/cache-policy/spec/virtual-server-cache-policy-basic-spec.yaml" +cache_vs_advanced_spec_src = f"{TEST_DATA}/cache-policy/spec/virtual-server-cache-policy-advanced-spec.yaml" +cache_vs_basic_route_src = f"{TEST_DATA}/cache-policy/route/virtual-server-cache-policy-basic-route.yaml" +cache_vs_advanced_route_src = f"{TEST_DATA}/cache-policy/route/virtual-server-cache-policy-advanced-route.yaml" +cache_vs_purge_src = f"{TEST_DATA}/cache-policy/spec/virtual-server-cache-policy-purge.yaml" + + +@pytest.mark.policies +@pytest.mark.policies_cache +@pytest.mark.parametrize( + "crd_ingress_controller, virtual_server_setup", + [ + ( + { + "type": "complete", + "extra_args": [f"-enable-custom-resources", f"-enable-leader-election=false", f"-enable-snippets"], + }, + { + "example": "virtual-server", + "app_type": "simple", + }, + ) + ], + indirect=True, +) +class TestCachePolicies: + def setup_cache_policy(self, kube_apis, test_namespace, policy_src): + print(f"Create cache policy") + pol_name = create_policy_from_yaml(kube_apis.custom_objects, policy_src, test_namespace) + wait_before_test() + return pol_name + + @pytest.mark.parametrize("src", [cache_vs_basic_spec_src, cache_vs_basic_route_src]) + def test_cache_policy_basic( + self, + kube_apis, + ingress_controller_prerequisites, + ingress_controller_endpoint, + crd_ingress_controller, + virtual_server_setup, + test_namespace, + src, + ): + """ + Test cache policy basic (GET only) configured at spec and route level + """ + + pol_name = self.setup_cache_policy(kube_apis, test_namespace, cache_pol_basic_src) + + # Apply VS with basic cache policy at spec level + delete_and_create_vs_from_yaml( + kube_apis.custom_objects, + virtual_server_setup.vs_name, + src, + test_namespace, + ) + + # Test cache behavior for GET requests + # First request should populate cache + resp_1 = requests.get(virtual_server_setup.backend_1_url, headers={"host": virtual_server_setup.vs_host}) + cache_status_1 = resp_1.headers.get("X-Cache-Status") + + # Second request should return cached content (same Request ID) + resp_2 = requests.get(virtual_server_setup.backend_1_url, headers={"host": virtual_server_setup.vs_host}) + cache_status_2 = resp_2.headers.get("X-Cache-Status") + + # POST requests should not be cached (different Request IDs expected) + resp_3 = requests.post(virtual_server_setup.backend_1_url, headers={"host": virtual_server_setup.vs_host}) + cache_status_3 = resp_3.headers.get("X-Cache-Status") + + resp_4 = requests.post(virtual_server_setup.backend_1_url, headers={"host": virtual_server_setup.vs_host}) + cache_status_4 = resp_4.headers.get("X-Cache-Status") + + # Extract Request IDs from response body + req_id_1 = re.search(r"Request ID: (\S+)", resp_1.text) + req_id_2 = re.search(r"Request ID: (\S+)", resp_2.text) + req_id_3 = re.search(r"Request ID: (\S+)", resp_3.text) + req_id_4 = re.search(r"Request ID: (\S+)", resp_4.text) + + assert all( + [ + resp_1.status_code == 200, + resp_2.status_code == 200, + resp_3.status_code == 200, + resp_4.status_code == 200, + "Request ID:" in resp_1.text, # Verify response body contains Request ID + "Request ID:" in resp_2.text, + "Request ID:" in resp_3.text, + "Request ID:" in resp_4.text, + ( + req_id_1.group(1) == req_id_2.group(1) if req_id_1 and req_id_2 else False + ), # GET requests cached (same Request ID) + ( + req_id_3.group(1) != req_id_4.group(1) if req_id_3 and req_id_4 else True + ), # POST requests not cached (different Request IDs) + cache_status_1 in ["MISS", "EXPIRED"], # First GET should be cache miss + cache_status_2 == "HIT", # Second GET should be cache hit + cache_status_3 in ["MISS", "EXPIRED", None], # POST should not be cached or use cached entry + cache_status_4 in ["MISS", "EXPIRED", None], # POST should not be cached + ] + ) + + delete_policy(kube_apis.custom_objects, pol_name, test_namespace) + delete_and_create_vs_from_yaml( + kube_apis.custom_objects, virtual_server_setup.vs_name, std_vs_src, test_namespace + ) + ns = ingress_controller_prerequisites.namespace + # Purge all existing cache entries by removing pods + pod_restart(kube_apis.v1, ns) + ensure_connection_to_public_endpoint( + ingress_controller_endpoint.public_ip, + ingress_controller_endpoint.port, + ingress_controller_endpoint.port_ssl, + ) + + @pytest.mark.parametrize("src", [cache_vs_advanced_spec_src, cache_vs_advanced_route_src]) + def test_cache_policy_advanced( + self, + kube_apis, + ingress_controller_prerequisites, + ingress_controller_endpoint, + crd_ingress_controller, + virtual_server_setup, + test_namespace, + src, + ): + """ + Test cache policy advanced (GET/HEAD/POST) configured at spec and route level + """ + + pol_name = self.setup_cache_policy(kube_apis, test_namespace, cache_pol_advanced_src) + + delete_and_create_vs_from_yaml( + kube_apis.custom_objects, + virtual_server_setup.vs_name, + src, + test_namespace, + ) + + # Test cache behavior for GET requests + resp_1 = requests.get(virtual_server_setup.backend_1_url, headers={"host": virtual_server_setup.vs_host}) + cache_status_1 = resp_1.headers.get("X-Cache-Status") + + resp_2 = requests.get(virtual_server_setup.backend_1_url, headers={"host": virtual_server_setup.vs_host}) + cache_status_2 = resp_2.headers.get("X-Cache-Status") + + # Test cache behavior for POST requests + resp_3 = requests.post(virtual_server_setup.backend_1_url, headers={"host": virtual_server_setup.vs_host}) + cache_status_3 = resp_3.headers.get("X-Cache-Status") + + # Test cache behavior for HEAD requests + resp_4 = requests.head(virtual_server_setup.backend_1_url, headers={"host": virtual_server_setup.vs_host}) + cache_status_4 = resp_4.headers.get("X-Cache-Status") + + # Extract Request IDs from response body (HEAD responses don't have body, use GET and POST) + req_id_1 = re.search(r"Request ID: (\S+)", resp_1.text) + req_id_2 = re.search(r"Request ID: (\S+)", resp_2.text) + req_id_3 = re.search(r"Request ID: (\S+)", resp_3.text) + + assert all( + [ + resp_1.status_code == 200, + resp_2.status_code == 200, + resp_3.status_code == 200, + resp_4.status_code == 200, + "Request ID:" in resp_1.text, + "Request ID:" in resp_2.text, + "Request ID:" in resp_3.text, + req_id_1.group(1) == req_id_2.group(1) == req_id_3.group(1), + cache_status_1 in ["MISS", "EXPIRED", None], + cache_status_2 == "HIT", + cache_status_3 == "HIT", + cache_status_4 == "HIT", + ] + ) + + # Cleanup + delete_policy(kube_apis.custom_objects, pol_name, test_namespace) + delete_and_create_vs_from_yaml( + kube_apis.custom_objects, virtual_server_setup.vs_name, std_vs_src, test_namespace + ) + + ns = ingress_controller_prerequisites.namespace + # Purge all existing cache entries by removing pods + pod_restart(kube_apis.v1, ns) + ensure_connection_to_public_endpoint( + ingress_controller_endpoint.public_ip, + ingress_controller_endpoint.port, + ingress_controller_endpoint.port_ssl, + ) + + @pytest.mark.skip_for_nginx_oss + def test_cache_policy_purge( + self, + kube_apis, + ingress_controller_prerequisites, + ingress_controller_endpoint, + crd_ingress_controller, + virtual_server_setup, + test_namespace, + ): + """ + Test cache policy purge + """ + + pol_name = self.setup_cache_policy(kube_apis, test_namespace, cache_pol_purge_src) + + delete_and_create_vs_from_yaml( + kube_apis.custom_objects, + virtual_server_setup.vs_name, + cache_vs_purge_src, + test_namespace, + ) + + # Test cache behavior for GET requests + resp_1 = requests.get(virtual_server_setup.backend_1_url, headers={"host": virtual_server_setup.vs_host}) + cache_status_1 = resp_1.headers.get("X-Cache-Status") + + resp_2 = requests.get(virtual_server_setup.backend_1_url, headers={"host": virtual_server_setup.vs_host}) + cache_status_2 = resp_2.headers.get("X-Cache-Status") + + # Purge request to remove cached content + # geo $purge_allowed_test_namespace_virtual_server_mycache { + # default 0; + # 0.0.0.0/0 1; + # } + resp_purge = requests.request( + "PURGE", virtual_server_setup.backend_1_url, headers={"host": virtual_server_setup.vs_host} + ) + + resp_3 = requests.get(virtual_server_setup.backend_1_url, headers={"host": virtual_server_setup.vs_host}) + cache_status_3 = resp_3.headers.get("X-Cache-Status") + + resp_4 = requests.get(virtual_server_setup.backend_1_url, headers={"host": virtual_server_setup.vs_host}) + cache_status_4 = resp_4.headers.get("X-Cache-Status") + + # Extract Request IDs from response body (HEAD responses don't have body, use GET and POST) + req_id_1 = re.search(r"Request ID: (\S+)", resp_1.text) + req_id_2 = re.search(r"Request ID: (\S+)", resp_2.text) + req_id_3 = re.search(r"Request ID: (\S+)", resp_3.text) + req_id_4 = re.search(r"Request ID: (\S+)", resp_4.text) + + assert all( + [ + resp_1.status_code == 200, + resp_2.status_code == 200, + resp_purge.status_code == 204, # PURGE should return 204 No Content + resp_3.status_code == 200, + resp_4.status_code == 200, + "Request ID:" in resp_1.text, + "Request ID:" in resp_2.text, + "Request ID:" in resp_3.text, + "Request ID:" in resp_4.text, + req_id_1.group(1) == req_id_2.group(1), + req_id_3.group(1) == req_id_4.group(1), + cache_status_1 == "MISS", + cache_status_2 == "HIT", + cache_status_3 == "MISS", # after PURGE, should be MISS + cache_status_4 == "HIT", + ] + ) + + delete_policy(kube_apis.custom_objects, pol_name, test_namespace) + delete_and_create_vs_from_yaml( + kube_apis.custom_objects, virtual_server_setup.vs_name, std_vs_src, test_namespace + ) + ns = ingress_controller_prerequisites.namespace + # Purge all existing cache entries by removing pods + pod_restart(kube_apis.v1, ns) + ensure_connection_to_public_endpoint( + ingress_controller_endpoint.public_ip, + ingress_controller_endpoint.port, + ingress_controller_endpoint.port_ssl, + ) diff --git a/tests/suite/test_cache_policies_vsr.py b/tests/suite/test_cache_policies_vsr.py new file mode 100644 index 0000000000..1947a00775 --- /dev/null +++ b/tests/suite/test_cache_policies_vsr.py @@ -0,0 +1,219 @@ +import re + +import pytest +import requests +from settings import TEST_DATA +from suite.utils.policy_resources_utils import create_policy_from_yaml, delete_policy +from suite.utils.resources_utils import ensure_connection_to_public_endpoint, pod_restart, wait_before_test +from suite.utils.vs_vsr_resources_utils import delete_and_create_v_s_route_from_yaml, delete_and_create_vs_from_yaml + +std_vsr_src = f"{TEST_DATA}/virtual-server-route/route-multiple.yaml" +cache_pol_basic_src = f"{TEST_DATA}/cache-policy/policies/cache-policy-basic.yaml" +cache_pol_advanced_src = f"{TEST_DATA}/cache-policy/policies/cache-policy-advanced.yaml" +cache_vs_vsr_src = f"{TEST_DATA}/cache-policy/vsr/virtual-server.yaml" +cache_vsr_basic_src = f"{TEST_DATA}/cache-policy/vsr/virtual-server-route-cache-policy-basic.yaml" +cache_vsr_advanced_src = f"{TEST_DATA}/cache-policy/vsr/virtual-server-route-cache-policy-advanced.yaml" + + +@pytest.mark.policies +@pytest.mark.policies_cache +@pytest.mark.parametrize( + "crd_ingress_controller, v_s_route_setup", + [ + ( + { + "type": "complete", + "extra_args": [ + f"-enable-custom-resources", + f"-enable-leader-election=false", + f"-enable-snippets", + ], + }, + {"example": "virtual-server-route"}, + ) + ], + indirect=True, +) +class TestCachePoliciesVSR: + + def setup_vs_cache_policy(self, kube_apis, namespace, policy_src, vs_name): + print(f"Create cache policy") + pol_name = create_policy_from_yaml(kube_apis.custom_objects, policy_src, namespace) + print("Update Virtual Server with snippets") + delete_and_create_vs_from_yaml(kube_apis.custom_objects, vs_name, cache_vs_vsr_src, namespace) + wait_before_test() + return pol_name + + def test_cache_policy_vsr_basic( + self, + kube_apis, + ingress_controller_prerequisites, + ingress_controller_endpoint, + crd_ingress_controller, + v_s_route_app_setup, + v_s_route_setup, + test_namespace, + ): + """ + Test cache policy basic (GET only) applied to VirtualServerRoute + """ + + req_url = f"http://{v_s_route_setup.public_endpoint.public_ip}:{v_s_route_setup.public_endpoint.port}" + pol_name = self.setup_vs_cache_policy( + kube_apis, v_s_route_setup.route_m.namespace, cache_pol_basic_src, v_s_route_setup.vs_name + ) + + print(f"VSR with basic cache policy: {cache_vsr_basic_src}") + delete_and_create_v_s_route_from_yaml( + kube_apis.custom_objects, + v_s_route_setup.route_m.name, + cache_vsr_basic_src, + v_s_route_setup.route_m.namespace, + ) + wait_before_test() + + # Test cache behavior for GET requests on subroute + resp_1 = requests.get(f"{req_url}{v_s_route_setup.route_m.paths[0]}", headers={"host": v_s_route_setup.vs_host}) + cache_status_1 = resp_1.headers.get("X-Cache-Status") + print(f"Cache status for first GET request: {cache_status_1}") + + resp_2 = requests.get(f"{req_url}{v_s_route_setup.route_m.paths[0]}", headers={"host": v_s_route_setup.vs_host}) + cache_status_2 = resp_2.headers.get("X-Cache-Status") + print(f"Cache status for second GET request: {cache_status_2}") + + # POST requests should not be cached (basic policy allows GET only) + resp_3 = requests.post( + f"{req_url}{v_s_route_setup.route_m.paths[0]}", headers={"host": v_s_route_setup.vs_host} + ) + cache_status_3 = resp_3.headers.get("X-Cache-Status") + print(f"Cache status for POST request: {cache_status_3}") + + resp_4 = requests.post( + f"{req_url}{v_s_route_setup.route_m.paths[0]}", headers={"host": v_s_route_setup.vs_host} + ) + cache_status_4 = resp_4.headers.get("X-Cache-Status") + print(f"Cache status for second POST request: {cache_status_4}") + + # Extract Request IDs from response body + req_id_1 = re.search(r"Request ID: (\S+)", resp_1.text) + req_id_2 = re.search(r"Request ID: (\S+)", resp_2.text) + req_id_3 = re.search(r"Request ID: (\S+)", resp_3.text) + req_id_4 = re.search(r"Request ID: (\S+)", resp_4.text) + + assert all( + [ + resp_1.status_code == 200, + resp_2.status_code == 200, + resp_3.status_code == 200, + resp_4.status_code == 200, + "Request ID:" in resp_1.text, + "Request ID:" in resp_2.text, + "Request ID:" in resp_3.text, + "Request ID:" in resp_4.text, + req_id_1.group(1) == req_id_2.group(1), # GET requests cached (same Request ID) + req_id_3.group(1) != req_id_4.group(1), # POST requests not cached (different Request IDs) + cache_status_1 in ["MISS", "EXPIRED"], + cache_status_2 == "HIT", + cache_status_3 in ["MISS", "EXPIRED", None], + cache_status_4 in ["MISS", "EXPIRED", None], + ] + ) + + delete_policy(kube_apis.custom_objects, pol_name, v_s_route_setup.route_m.namespace) + delete_and_create_v_s_route_from_yaml( + kube_apis.custom_objects, v_s_route_setup.route_m.name, std_vsr_src, v_s_route_setup.route_m.namespace + ) + ns = ingress_controller_prerequisites.namespace + # Purge all existing cache entries by removing pods + pod_restart(kube_apis.v1, ns) + ensure_connection_to_public_endpoint( + ingress_controller_endpoint.public_ip, + ingress_controller_endpoint.port, + ingress_controller_endpoint.port_ssl, + ) + + def test_cache_policy_vsr_advanced( + self, + kube_apis, + ingress_controller_prerequisites, + ingress_controller_endpoint, + crd_ingress_controller, + v_s_route_app_setup, + v_s_route_setup, + test_namespace, + ): + """ + Test cache policy advanced (GET/HEAD/POST) applied to VirtualServerRoute + """ + + req_url = f"http://{v_s_route_setup.public_endpoint.public_ip}:{v_s_route_setup.public_endpoint.port}" + pol_name = self.setup_vs_cache_policy( + kube_apis, v_s_route_setup.route_m.namespace, cache_pol_advanced_src, v_s_route_setup.vs_name + ) + + print(f"VSR with advanced cache policy: {cache_vsr_advanced_src}") + delete_and_create_v_s_route_from_yaml( + kube_apis.custom_objects, + v_s_route_setup.route_m.name, + cache_vsr_advanced_src, + v_s_route_setup.route_m.namespace, + ) + wait_before_test() + + # Test cache behavior for GET requests + resp_1 = requests.get(f"{req_url}{v_s_route_setup.route_m.paths[0]}", headers={"host": v_s_route_setup.vs_host}) + cache_status_1 = resp_1.headers.get("X-Cache-Status") + print(f"Cache status for first GET request: {cache_status_1}") + + resp_2 = requests.get(f"{req_url}{v_s_route_setup.route_m.paths[0]}", headers={"host": v_s_route_setup.vs_host}) + cache_status_2 = resp_2.headers.get("X-Cache-Status") + print(f"Cache status for second GET request: {cache_status_2}") + + # Test cache behavior for POST requests (should be cached with advanced policy) + resp_3 = requests.post( + f"{req_url}{v_s_route_setup.route_m.paths[0]}", headers={"host": v_s_route_setup.vs_host} + ) + cache_status_3 = resp_3.headers.get("X-Cache-Status") + print(f"Cache status for first POST request: {cache_status_3}") + + # Test cache behavior for HEAD requests + resp_4 = requests.head( + f"{req_url}{v_s_route_setup.route_m.paths[0]}", headers={"host": v_s_route_setup.vs_host} + ) + cache_status_4 = resp_4.headers.get("X-Cache-Status") + print(f"Cache status for first HEAD request: {cache_status_4}") + + # Extract Request IDs from response body + req_id_1 = re.search(r"Request ID: (\S+)", resp_1.text) + req_id_2 = re.search(r"Request ID: (\S+)", resp_2.text) + req_id_3 = re.search(r"Request ID: (\S+)", resp_3.text) + + assert all( + [ + resp_1.status_code == 200, + resp_2.status_code == 200, + resp_3.status_code == 200, + resp_4.status_code == 200, + "Request ID:" in resp_1.text, + "Request ID:" in resp_2.text, + "Request ID:" in resp_3.text, + req_id_1.group(1) == req_id_2.group(1) == req_id_3.group(1), + cache_status_1 in ["MISS", "EXPIRED", None], + cache_status_2 == "HIT", + cache_status_3 == "HIT", + cache_status_4 == "HIT", + ] + ) + + delete_policy(kube_apis.custom_objects, pol_name, v_s_route_setup.route_m.namespace) + delete_and_create_v_s_route_from_yaml( + kube_apis.custom_objects, v_s_route_setup.route_m.name, std_vsr_src, v_s_route_setup.route_m.namespace + ) + ns = ingress_controller_prerequisites.namespace + # Purge all existing cache entries by removing pods + pod_restart(kube_apis.v1, ns) + ensure_connection_to_public_endpoint( + ingress_controller_endpoint.public_ip, + ingress_controller_endpoint.port, + ingress_controller_endpoint.port_ssl, + ) diff --git a/tests/suite/utils/resources_utils.py b/tests/suite/utils/resources_utils.py index 9a25cc0aea..4ec4072df1 100644 --- a/tests/suite/utils/resources_utils.py +++ b/tests/suite/utils/resources_utils.py @@ -2062,3 +2062,25 @@ def read_ingress(v1: NetworkingV1Api, name, namespace) -> V1Ingress: """ print(f"Read an ingress named '{name}'") return v1.read_namespaced_ingress(name, namespace) + + +def pod_restart(v1: CoreV1Api, namespace): + """ + Restart all pods in a deployment. + """ + try: + pods = v1.list_namespaced_pod(namespace=namespace) + + print(f"Found {len(pods.items)} pods to restart") + + # Delete all pods (they will be recreated by deployment) + for pod in pods.items: + print(f"Deleting pod {pod.metadata.name}") + v1.delete_namespaced_pod(name=pod.metadata.name, namespace=namespace) + + wait_until_all_pods_are_ready(v1, namespace) + print("Pod restart complete") + + except Exception as e: + print(f"Error in pod restart: {e}") + raise e