Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
33 commits
Select commit Hold shift + click to select a range
05cee51
Revised initial commit for cluster hardening leveraging the complianc…
Sep 4, 2025
cc6b3ce
Updated the ScanSetting and ScanSettingBinding yaml to not stick stuf…
Sep 4, 2025
4e181be
Removed resource limits from scan bindings and result viewer
Sep 4, 2025
20dcc46
Removed extra line from values.yaml
Sep 4, 2025
7d5ee8f
Removed extra api refs
Sep 4, 2025
a3a7053
Modified calling of single profile for scansettingbinding
Sep 4, 2025
fff995d
Removed apiGroup from profiles
Sep 4, 2025
971563c
Removed apiVersion from settingsRef
Sep 4, 2025
8f0755d
Removed tolerations from result viewer
Sep 4, 2025
123af1d
Updated to use the rhcos4-stig profile
Sep 4, 2025
88be04a
Setting a daily at 0200 cron to see if I can get setting to run on in…
Sep 4, 2025
e2dfaf3
Updated comment for schedule field
Sep 4, 2025
732c8bf
Upodated description and reintroiduced apiGroup elements in scan-sett…
Sep 5, 2025
e4e1fbb
Updated cron format for schedule
Sep 5, 2025
06095df
Added roles for master and worker to scansetting since they are not i…
Sep 5, 2025
f168626
Fixed formatting issue on roles
Sep 5, 2025
bbc2021
Removing schdule cron entry so it only runs on deployment of pattern
Sep 5, 2025
5992d24
Used Cursor to generate a job that waits on execution of compliancesc…
Sep 5, 2025
d8bfb6d
Needs to run as root. We can revisit this in review
Sep 5, 2025
202ada7
Updated image refs to ubi9 unauthenticated access container images
Sep 5, 2025
a891903
Removed cursor generated securitycontext so we inherit from default c…
Sep 5, 2025
63adfa5
Updated remediaiton job to not try and dnf install jq and kubectl but…
Sep 5, 2025
4ef6068
Modified the image to pull from older quay vp registry org
Sep 5, 2025
821a784
Updated the complaincescan naming convention to be profile-role
Sep 5, 2025
4145d90
Fixed while loop logic to get compliancescans correctly
Sep 5, 2025
71ca8c0
Removed brackets around while
Sep 5, 2025
f9caa15
Script fixes for value checks in remediation job
Sep 5, 2025
d45f55f
Updated bash script to patch the complianceremediaitons CRs with spe…
Sep 5, 2025
187763d
Updated version to chart
Sep 8, 2025
d5d6a1d
Removed env setting for bash script access to KUBECONFIG based on dis…
Sep 8, 2025
71b7522
Removed reasonApplied for kubectl vreate event at end of bash script …
Sep 8, 2025
96ece4f
Removed the autoremediaiton annotation on scansettingbinding per disc…
Sep 8, 2025
e037fdb
Removed the result viewer pod since user can see results directly for…
Sep 8, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions charts/compliance-scanning/Chart.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
apiVersion: v2
description: compliance-scanning performs hardening of OCP cluster vis predefiend profile(s)
keywords:
- pattern
- compliance
- zero trust
- hardening
name: compliance-scanning
type: application
icon: https://validatedpatterns.io/images/validated-patterns.png
version: 0.0.3
61 changes: 61 additions & 0 deletions charts/compliance-scanning/templates/_helpers.tpl
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "compliance-scanning.name" -}}
{{- default .Chart.Name .Values.nameOverride }}
{{- end }}

{{/*
Create a default fully qualified app name.
If release name contains chart name it will be used as a full name.
*/}}
{{- define "compliance-scanning.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name }}
{{- end }}
{{- end }}
{{- end }}

{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "compliance-scanning.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" }}
{{- end }}

{{/*
Common labels
*/}}
{{- define "compliance-scanning.labels" -}}
helm.sh/chart: {{ include "compliance-scanning.chart" . }}
{{ include "compliance-scanning.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}

{{/*
Selector labels
*/}}
{{- define "compliance-scanning.selectorLabels" -}}
app.kubernetes.io/name: {{ include "compliance-scanning.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}

{{/*
Create the name of the service account to use
*/}}
{{- define "compliance-scanning.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "compliance-scanning.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
20 changes: 20 additions & 0 deletions charts/compliance-scanning/templates/pvc.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
{{- if .Values.compliance.storage.enabled }}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ .Values.compliance.storage.pvc.name }}
namespace: openshift-compliance
annotations:
argocd.argoproj.io/sync-wave: '-10'
labels:
app.kubernetes.io/component: storage
spec:
accessModes:
- {{ .Values.compliance.storage.pvc.accessMode }}
resources:
requests:
storage: {{ .Values.compliance.storage.pvc.size }}
{{- if .Values.compliance.storage.pvc.storageClass }}
storageClassName: {{ .Values.compliance.storage.pvc.storageClass }}
{{- end }}
{{- end }}
206 changes: 206 additions & 0 deletions charts/compliance-scanning/templates/remediation-job.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,206 @@
# ServiceAccount for the remediation job
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .Values.compliance.remediationJob.serviceAccount.name }}
namespace: openshift-compliance
annotations:
argocd.argoproj.io/sync-wave: '0'
labels:
app.kubernetes.io/component: compliance-remediation-job
---
# ClusterRole with permissions to manage compliance resources
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does this need to be a clusterRole? If all the objects you're touching are in openshift-compliance it might not need to be.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It may not need to be. The fact I set it that way shows my ignorance of remediation controller within compliance operator. I did set all elements to be openshift-compliance namespace focused but don't know if namespace scoped auth is sufficient for something that can reboot nodes during remediation

metadata:
name: {{ .Values.compliance.remediationJob.clusterRole.name }}
annotations:
argocd.argoproj.io/sync-wave: '0'
labels:
app.kubernetes.io/component: compliance-remediation-job
rules:
- apiGroups: ["compliance.openshift.io"]
resources: ["compliancescans"]
verbs: ["get", "list", "watch"]
- apiGroups: ["compliance.openshift.io"]
resources: ["complianceremediations"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
---
# ClusterRoleBinding to bind the role to the service account
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ .Values.compliance.remediationJob.clusterRoleBinding.name }}
annotations:
argocd.argoproj.io/sync-wave: '0'
labels:
app.kubernetes.io/component: compliance-remediation-job
subjects:
- kind: ServiceAccount
name: {{ .Values.compliance.remediationJob.serviceAccount.name }}
namespace: openshift-compliance
roleRef:
kind: ClusterRole
name: {{ .Values.compliance.remediationJob.clusterRole.name }}
apiGroup: rbac.authorization.k8s.io
---
# Job to wait for scan completion and apply remediations
apiVersion: batch/v1
kind: Job
metadata:
name: {{ .Values.compliance.remediationJob.name }}
namespace: openshift-compliance
annotations:
argocd.argoproj.io/sync-wave: '10'
labels:
app.kubernetes.io/component: compliance-remediation-job
spec:
template:
metadata:
labels:
app.kubernetes.io/component: compliance-remediation-job
spec:
serviceAccountName: {{ .Values.compliance.remediationJob.serviceAccount.name }}
restartPolicy: OnFailure
containers:
- name: remediation-applier
image: {{ .Values.compliance.remediationJob.image }}
command: ["/bin/bash"]
args:
- -c
- |
set -Eeuxo pipefail

echo "Starting compliance remediation job..."
echo "Scan Setting Binding: {{ .Values.compliance.scanSettingBinding.name }}"
echo "Profile: {{ .Values.compliance.scanSettingBinding.profile }}"

# The ComplianceScan name is typically generated from the binding name and profile
# Format: <profile-name>-<role>
SCAN_NAME="{{ .Values.compliance.scanSettingBinding.profile }}-worker"
NAMESPACE="openshift-compliance"

echo "Waiting for ComplianceScan: $SCAN_NAME to complete..."

# Wait for the scan to exist first

max_wait_for_scan=600 # 10 minutes

wait_time=0


while [[ $(kubectl get compliancescan "$SCAN_NAME" -n "$NAMESPACE" -o \
jsonpath='{..status.conditions[?(@.type=="Ready")].status}') != "True" ]]; do
if [ $wait_time -ge $max_wait_for_scan ]; then
echo "ERROR: ComplianceScan $SCAN_NAME not found after ${max_wait_for_scan}s"
exit 1
fi
echo "Waiting for ComplianceScan $SCAN_NAME to be created... (${wait_time}s elapsed)"
sleep 10
wait_time=$((wait_time + 10))
done


echo "ComplianceScan $SCAN_NAME found. Waiting for completion..."


# Wait for the scan to complete

max_wait=7200 # 2 hours

wait_time=0


while true; do
# Get the scan phase/status
PHASE=$(kubectl get compliancescan "$SCAN_NAME" -n "$NAMESPACE" -o jsonpath='{.status.phase}')

if [ "$PHASE" = "DONE" ]; then
echo "ComplianceScan $SCAN_NAME completed successfully!"
break
elif [ "$PHASE" = "ERROR" ] || [ "$PHASE" = "FAILED" ]; then
echo "ERROR: ComplianceScan $SCAN_NAME failed with phase: $PHASE"
exit 1
elif [ -z "$PHASE" ]; then
echo "ComplianceScan $SCAN_NAME phase is empty, waiting..."
else
echo "ComplianceScan $SCAN_NAME is in phase: $PHASE (waiting...)"
fi

if [ $wait_time -ge $max_wait ]; then
echo "ERROR: ComplianceScan $SCAN_NAME did not complete within ${max_wait}s"
exit 1
fi

sleep 30
wait_time=$((wait_time + 30))
done


echo "Scan completed. Looking for pending ComplianceRemediations..."


# Get all ComplianceRemediation objects with state=Pending and spec.apply=false

PENDING_REMEDIATIONS=$(kubectl get complianceremediations -n "$NAMESPACE" -o json | \
jq -r '.items[] | select(.status.applicationState == "Pending" and .spec.apply == false) | .metadata.name')

if [ -z "$PENDING_REMEDIATIONS" ]; then
echo "No pending ComplianceRemediations found with spec.apply=false"
exit 0
fi


echo "Found pending ComplianceRemediations to apply:"

echo "$PENDING_REMEDIATIONS"


# Apply each remediation by setting spec.apply to true

APPLIED_COUNT=0

for REMEDIATION in $PENDING_REMEDIATIONS; do
echo "Applying remediation: $REMEDIATION"

if kubectl patch complianceremediation "$REMEDIATION" -n "$NAMESPACE" \
--type='merge' \
--patch='{"spec": {"apply": true}}'; then
echo "Successfully applied remediation: $REMEDIATION"
APPLIED_COUNT=$((APPLIED_COUNT + 1))
else
echo "ERROR: Failed to apply remediation: $REMEDIATION"
fi
done


echo "Remediation job completed. Applied $APPLIED_COUNT remediations."


# Create an event to record the action

kubectl create event \
--namespace="$NAMESPACE" \
--message="Applied $APPLIED_COUNT ComplianceRemediations after scan completion" \
--for="job/compliance-remediation-job" || true
resources:
{{- toYaml .Values.compliance.remediationJob.resources | nindent 10 }}
{{- if .Values.compliance.remediationJob.securityContext }}
securityContext:
{{- toYaml .Values.compliance.remediationJob.securityContext | nindent 10 }}
{{- end }}
{{- if .Values.compliance.remediationJob.nodeSelector }}
nodeSelector:
{{- toYaml .Values.compliance.remediationJob.nodeSelector | nindent 8 }}
{{- end }}
{{- if .Values.compliance.remediationJob.tolerations }}
tolerations:
{{- toYaml .Values.compliance.remediationJob.tolerations | nindent 8 }}
{{- end }}
{{- if .Values.compliance.remediationJob.affinity }}
affinity:
{{- toYaml .Values.compliance.remediationJob.affinity | nindent 8 }}
{{- end }}
19 changes: 19 additions & 0 deletions charts/compliance-scanning/templates/scan-setting-binding.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
apiVersion: compliance.openshift.io/v1alpha1
kind: ScanSettingBinding
metadata:
name: {{ .Values.compliance.scanSettingBinding.name }}
namespace: openshift-compliance
annotations:
argocd.argoproj.io/sync-wave: '-10'
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We generally only use sync-waves when not using them leads to errors or other problems with deployments. I am very unfamiliar with the compliance operator, so they may indeed be necessary. But if they aren't it's better not to have them. (As discussed we definitely need them in clustergroup, though).

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This was me trying to ensure that the scan stuff was there before the results viewer and remediation updaters were available

labels:
app.kubernetes.io/component: compliance-scan-binding
settingsRef:
kind: ScanSetting
apiGroup: compliance.openshift.io/v1alpha1
name: {{ .Values.compliance.scanSetting.name }}
# Profiles to bind to this scan setting
profiles:
- name: {{ .Values.compliance.scanSettingBinding.profile }}
kind: Profile
apiGroup: compliance.openshift.io/v1alpha1

39 changes: 39 additions & 0 deletions charts/compliance-scanning/templates/scan-setting.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
apiVersion: compliance.openshift.io/v1alpha1
kind: ScanSetting
metadata:
name: {{ .Values.compliance.scanSetting.name }}
namespace: openshift-compliance
annotations:
argocd.argoproj.io/sync-wave: '-10'
labels:
app.kubernetes.io/component: compliance-scan-setting
# Enable automatic application of remediations
autoApplyRemediations: {{ .Values.compliance.scanSetting.autoApplyRemediations }}
# Use default-auto-apply annotation for automatic remediation
autoUpdateRemediations: true
# Scanning schedule (empty means manual trigger)
{{- if .Values.compliance.scanSetting.schedule }}
schedule: {{ .Values.compliance.scanSetting.schedule | quote }}
{{- end }}
roles:
- master
- worker
# Scanner pod tolerations to run on all nodes including masters
scanTolerations:
{{- toYaml .Values.compliance.scanSetting.scanTolerations | nindent 4 }}
# Node selector for scanner pods (if specified)
{{- if .Values.compliance.scanSetting.nodeSelector }}
nodeSelector:
{{- toYaml .Values.compliance.scanSetting.nodeSelector | nindent 4 }}
{{- end }}
# PVC for storing scan results
{{- if .Values.compliance.storage.enabled }}
rawResultStorage:
pvAccessModes:
- {{ .Values.compliance.storage.pvc.accessMode }}
rotation: 5
size: {{ .Values.compliance.storage.pvc.size }}
{{- if .Values.compliance.storage.pvc.storageClass }}
storageClassName: {{ .Values.compliance.storage.pvc.storageClass }}
{{- end }}
{{- end }}
Loading