|
1 |
| -IMAGE ?= eopf-geozarr:dev |
2 |
| -WF ?= geozarr-convert.yaml |
3 |
| -CLUSTER ?= k3s-default # change if your k3d cluster has a different name |
| 1 | +# ===== Config ===== |
| 2 | +IMAGE ?= eopf-geozarr:dev |
| 3 | +NAMESPACE ?= argo # Kubernetes namespace where Argo runs |
| 4 | +TPL ?= geozarr-convert-template.yaml |
| 5 | +PARAMS ?= params.json |
| 6 | +CLUSTER ?= k3s-default |
4 | 7 |
|
5 |
| -.PHONY: build load-k3d load-minikube argo-install argo-ui-dev submit status logs latest clean |
| 8 | +# Runtime param overrides (env > PARAMS file) |
| 9 | +STAC_URL ?= |
| 10 | +OUTPUT_ZARR ?= |
| 11 | +GROUPS ?= |
6 | 12 |
|
| 13 | +# Abbrev: WF = Workflow name; PVC = PersistentVolumeClaim (<WF>-outpvc) |
| 14 | + |
| 15 | +.PHONY: build load-k3d load-minikube argo-install template apply \ |
| 16 | + submit submit-cli submit-api status latest logs-save clean \ |
| 17 | + _ensure-dirs fetch-tar run clean-pvc |
| 18 | + |
| 19 | +# Build the image locally |
| 20 | +# make build -> WHEEL mode (small), builds linux/amd64 |
| 21 | +# make build PORTABLE=1 -> PORTABLE mode (bigger), builds for native arch |
7 | 22 | build:
|
8 |
| - docker build -t $(IMAGE) . |
| 23 | + @if [ "$(PORTABLE)" = "1" ]; then \ |
| 24 | + echo "==> Building PORTABLE image for native platform (allows source builds)"; \ |
| 25 | + docker build \ |
| 26 | + --build-arg PORTABLE_BUILD=1 \ |
| 27 | + -t $(IMAGE) . ; \ |
| 28 | + else \ |
| 29 | + echo "==> Building WHEEL image for linux/amd64 (prebuilt wheels)"; \ |
| 30 | + docker buildx build --platform=linux/amd64 \ |
| 31 | + --build-arg PORTABLE_BUILD=0 \ |
| 32 | + -t $(IMAGE) --load . ; \ |
| 33 | + fi |
9 | 34 |
|
10 |
| -# k3d: import local image into cluster's containerd |
| 35 | +# Load image into k3d’s containerd (dev clusters) |
11 | 36 | load-k3d:
|
12 | 37 | k3d image import $(IMAGE) --cluster $(CLUSTER) || \
|
13 | 38 | (docker save $(IMAGE) | docker exec -i $$(docker ps --format '{{.Names}}' | grep $(CLUSTER)-server-0) ctr -n k8s.io images import -)
|
14 | 39 |
|
15 |
| -# minikube: build inside minikube's docker |
| 40 | +# Build the image inside minikube’s Docker |
16 | 41 | load-minikube:
|
17 | 42 | eval "$$(minikube docker-env)"; docker build -t $(IMAGE) .
|
18 | 43 |
|
19 |
| -# install Argo Workflows (3.7.1) if missing |
| 44 | +# Install Argo Workflows (v3.7.1) into $(NAMESPACE) |
20 | 45 | argo-install:
|
21 |
| - kubectl create ns argo 2>/dev/null || true |
22 |
| - kubectl apply -n argo -f https://github.com/argoproj/argo-workflows/releases/download/v3.7.1/install.yaml |
23 |
| - kubectl -n argo rollout status deploy/workflow-controller |
24 |
| - kubectl -n argo rollout status deploy/argo-server |
25 |
| - |
26 |
| -# dev UI: HTTP, no token |
27 |
| -argo-ui-dev: |
28 |
| - kubectl -n argo patch deploy argo-server --type='json' -p='[ \ |
29 |
| - {"op":"replace","path":"/spec/template/spec/containers/0/args", \ |
30 |
| - "value":["server","--auth-mode=server","--secure=false"]} ]' || true |
31 |
| - kubectl -n argo rollout status deploy/argo-server |
32 |
| - kubectl -n argo port-forward svc/argo-server 2746:2746 |
33 |
| - |
34 |
| -submit: |
35 |
| - argo submit -n argo $(WF) --watch |
| 46 | + kubectl create ns $(NAMESPACE) 2>/dev/null || true |
| 47 | + kubectl apply -n $(NAMESPACE) -f https://github.com/argoproj/argo-workflows/releases/download/v3.7.1/install.yaml |
| 48 | + kubectl -n $(NAMESPACE) rollout status deploy/workflow-controller |
| 49 | + kubectl -n $(NAMESPACE) rollout status deploy/argo-server |
36 | 50 |
|
37 |
| -status: |
38 |
| - argo list -n argo || true |
39 |
| - kubectl -n argo get wf || true |
40 |
| - kubectl -n argo get pods || true |
| 51 | +# Apply (or update) the WorkflowTemplate |
| 52 | +template: |
| 53 | + kubectl -n $(NAMESPACE) apply -f $(TPL) |
| 54 | + kubectl -n $(NAMESPACE) get workflowtemplate geozarr-convert |
| 55 | + |
| 56 | +# Build + load + install + template (one shot) |
| 57 | +apply: build load-k3d argo-install template |
41 | 58 |
|
42 |
| -logs: |
43 |
| - argo logs -n argo @latest -f |
| 59 | +# Submit via CLI (uses env overrides, else PARAMS file) |
| 60 | +submit: _ensure-dirs |
| 61 | + @STAC="$${STAC_URL:-$$(jq -r '.arguments.parameters[] | select(.name=="stac_url").value' $(PARAMS))}"; \ |
| 62 | + OUT="$${OUTPUT_ZARR:-$$(jq -r '.arguments.parameters[] | select(.name=="output_zarr").value' $(PARAMS))}"; \ |
| 63 | + GRP="$${GROUPS:-$$(jq -r '.arguments.parameters[] | select(.name=="groups").value' $(PARAMS))}"; \ |
| 64 | + echo "Submitting:"; echo " stac_url=$$STAC"; echo " output_zarr=$$OUT"; echo " groups=$$GRP"; \ |
| 65 | + WF=$$(argo submit -n $(NAMESPACE) --from workflowtemplate/geozarr-convert \ |
| 66 | + -p stac_url="$$STAC" -p output_zarr="$$OUT" -p groups="$$GRP" -o name); \ |
| 67 | + TSTAMP=$$(date +%Y%m%d-%H%M%S); \ |
| 68 | + argo get -n $(NAMESPACE) $$WF -o json > runs/$${TSTAMP}-$${WF##*/}.json; \ |
| 69 | + argo get -n $(NAMESPACE) $$WF --output wide | tee runs/$${TSTAMP}-$${WF##*/}.summary.txt; \ |
| 70 | + echo "Workflow: $$WF" |
| 71 | + |
| 72 | +# Submit via CLI (PARAMS file only, no env overrides) |
| 73 | +submit-cli: _ensure-dirs |
| 74 | + @WF=$$(argo submit -n $(NAMESPACE) --from workflowtemplate/geozarr-convert \ |
| 75 | + -p stac_url="$$(jq -r '.arguments.parameters[] | select(.name=="stac_url").value' $(PARAMS))" \ |
| 76 | + -p output_zarr="$$(jq -r '.arguments.parameters[] | select(.name=="output_zarr").value' $(PARAMS))" \ |
| 77 | + -p groups="$$(jq -r '.arguments.parameters[] | select(.name=="groups").value' $(PARAMS))" \ |
| 78 | + -o name); \ |
| 79 | + TSTAMP=$$(date +%Y%m%d-%H%M%S); \ |
| 80 | + argo get -n $(NAMESPACE) $$WF -o json > runs/$${TSTAMP}-$${WF##*/}.json; \ |
| 81 | + argo get -n $(NAMESPACE) $$WF --output wide | tee runs/$${TSTAMP}-$${WF##*/}.summary.txt; \ |
| 82 | + echo "Workflow: $$WF" |
| 83 | + |
| 84 | +# Submit via Argo Server HTTP (dev port-forward, no token) |
| 85 | +submit-api: _ensure-dirs |
| 86 | + kubectl -n $(NAMESPACE) port-forward svc/argo-server 2746:2746 >/dev/null 2>&1 & echo $$! > .pf.pid |
| 87 | + sleep 1 |
| 88 | + curl -s -H 'Content-Type: application/json' \ |
| 89 | + --data-binary @$(PARAMS) \ |
| 90 | + http://localhost:2746/api/v1/workflows/$(NAMESPACE)/submit \ |
| 91 | + | tee runs/submit-response.json | jq . >/dev/null || \ |
| 92 | + (echo "Non-JSON response (see runs/submit-response.json)"; exit 1) |
| 93 | + -@[ -f .pf.pid ] && kill $$(cat .pf.pid) 2>/dev/null || true |
| 94 | + -@rm -f .pf.pid |
| 95 | + |
| 96 | +# Inspect |
| 97 | +status: |
| 98 | + argo list -n $(NAMESPACE); echo; kubectl -n $(NAMESPACE) get wf |
44 | 99 |
|
45 | 100 | latest:
|
46 |
| - argo get -n argo @latest |
| 101 | + argo get -n $(NAMESPACE) @latest --output wide |
| 102 | + |
| 103 | +logs-save: _ensure-dirs |
| 104 | + @WF=$$(argo list -n $(NAMESPACE) --output name | tail -1); \ |
| 105 | + TSTAMP=$$(date +%Y%m%d-%H%M%S); \ |
| 106 | + argo logs -n $(NAMESPACE) $$WF -c main > logs/$${TSTAMP}-$${WF##*/}.log; \ |
| 107 | + echo "Wrote logs/$${TSTAMP}-$${WF##*/}.log" |
47 | 108 |
|
| 109 | +# Delete all workflows + completed pods |
48 | 110 | clean:
|
49 |
| - argo delete -n argo --all || true |
50 |
| - kubectl -n argo delete pod -l workflows.argoproj.io/completed=true --force --grace-period=0 || true |
| 111 | + argo delete -n $(NAMESPACE) --all || true |
| 112 | + kubectl -n $(NAMESPACE) delete pod -l workflows.argoproj.io/completed=true --force --grace-period=0 || true |
| 113 | + |
| 114 | +_ensure-dirs: |
| 115 | + @mkdir -p runs logs |
| 116 | + |
| 117 | +# Fetch from PVC: copy tarball, unpack into runs/<WF>/, pull any extra files on /outputs |
| 118 | +fetch-tar: _ensure-dirs |
| 119 | + @WF=$$(argo list -n $(NAMESPACE) --output name | tail -1 | sed 's#.*/##'); \ |
| 120 | + PVC="$$WF-outpvc"; OUTDIR="runs/$$WF"; \ |
| 121 | + echo "Workflow: $$WF"; echo "PVC: $$PVC"; mkdir -p $$OUTDIR; \ |
| 122 | + kubectl -n $(NAMESPACE) delete pod fetch-$$WF --ignore-not-found >/dev/null 2>&1 || true; \ |
| 123 | + cat <<'YAML' | sed "s/{{WF}}/$$WF/g" | sed "s/{{PVC}}/$$PVC/g" | kubectl -n $(NAMESPACE) apply -f - |
| 124 | + apiVersion: v1 |
| 125 | + kind: Pod |
| 126 | + metadata: |
| 127 | + name: fetch-{{WF}} |
| 128 | + spec: |
| 129 | + restartPolicy: Never |
| 130 | + containers: |
| 131 | + - name: fetch |
| 132 | + image: busybox:1.36 |
| 133 | + command: ["sh","-lc","sleep 600"] |
| 134 | + volumeMounts: |
| 135 | + - name: out |
| 136 | + mountPath: /mnt/out |
| 137 | + volumes: |
| 138 | + - name: out |
| 139 | + persistentVolumeClaim: |
| 140 | + claimName: {{PVC}} |
| 141 | + YAML |
| 142 | + kubectl -n $(NAMESPACE) wait --for=condition=Ready pod/fetch-$$WF --timeout=60s |
| 143 | + # Main artifact |
| 144 | + kubectl -n $(NAMESPACE) cp fetch-$$WF:/mnt/out/geozarr.tar.gz $$OUTDIR/geozarr.tar.gz |
| 145 | + tar -xzf $$OUTDIR/geozarr.tar.gz -C $$OUTDIR |
| 146 | + # Copy any other files (e.g., dask-report.html) |
| 147 | + kubectl -n $(NAMESPACE) cp fetch-$$WF:/mnt/out/. $$OUTDIR/ || true |
| 148 | + kubectl -n $(NAMESPACE) delete pod fetch-$$WF --wait=false |
| 149 | + @echo "Unpacked into $$OUTDIR/" |
| 150 | + |
| 151 | +# Convenience: build + load + template + submit + fetch |
| 152 | +run: apply submit fetch-tar |
| 153 | + |
| 154 | +# Cleanup stray per-run PVCs (removes stored artifacts) |
| 155 | +clean-pvc: |
| 156 | + kubectl -n $(NAMESPACE) delete pvc -l workflows.argoproj.io/workflow 2>/dev/null || true |
0 commit comments