|
| 1 | +# ===== Config ===== |
| 2 | +IMAGE ?= eopf-geozarr:dev |
| 3 | +NAMESPACE ?= argo # Kubernetes namespace where Argo runs |
| 4 | +TPL ?= geozarr-convert-template.yaml |
| 5 | +PARAMS ?= params.json |
| 6 | +CLUSTER ?= k3s-default |
| 7 | + |
| 8 | +# Runtime param overrides (env > PARAMS file) |
| 9 | +STAC_URL ?= |
| 10 | +OUTPUT_ZARR ?= |
| 11 | +GROUPS ?= |
| 12 | + |
| 13 | +# Abbrev: WF = Workflow name; PVC = PersistentVolumeClaim (<WF>-outpvc) |
| 14 | + |
| 15 | +.PHONY: build load-k3d load-minikube argo-install template apply \ |
| 16 | + submit submit-cli submit-api status latest logs-save clean \ |
| 17 | + _ensure-dirs fetch-tar run clean-pvc |
| 18 | + |
| 19 | +# Build the image locally |
| 20 | +# make build -> WHEEL mode (small), builds linux/amd64 |
| 21 | +# make build PORTABLE=1 -> PORTABLE mode (bigger), builds for native arch |
| 22 | +build: |
| 23 | + @if [ "$(PORTABLE)" = "1" ]; then \ |
| 24 | + echo "==> Building PORTABLE image for native platform (allows source builds)"; \ |
| 25 | + docker build \ |
| 26 | + --build-arg PORTABLE_BUILD=1 \ |
| 27 | + -t $(IMAGE) . ; \ |
| 28 | + else \ |
| 29 | + echo "==> Building WHEEL image for linux/amd64 (prebuilt wheels)"; \ |
| 30 | + docker buildx build --platform=linux/amd64 \ |
| 31 | + --build-arg PORTABLE_BUILD=0 \ |
| 32 | + -t $(IMAGE) --load . ; \ |
| 33 | + fi |
| 34 | + |
| 35 | +# Load image into k3d’s containerd (dev clusters) |
| 36 | +load-k3d: |
| 37 | + k3d image import $(IMAGE) --cluster $(CLUSTER) || \ |
| 38 | + (docker save $(IMAGE) | docker exec -i $$(docker ps --format '{{.Names}}' | grep $(CLUSTER)-server-0) ctr -n k8s.io images import -) |
| 39 | + |
| 40 | +# Build the image inside minikube’s Docker |
| 41 | +load-minikube: |
| 42 | + eval "$$(minikube docker-env)"; docker build -t $(IMAGE) . |
| 43 | + |
| 44 | +# Install Argo Workflows (v3.7.1) into $(NAMESPACE) |
| 45 | +argo-install: |
| 46 | + kubectl create ns $(NAMESPACE) 2>/dev/null || true |
| 47 | + kubectl apply -n $(NAMESPACE) -f https://github.com/argoproj/argo-workflows/releases/download/v3.7.1/install.yaml |
| 48 | + kubectl -n $(NAMESPACE) rollout status deploy/workflow-controller |
| 49 | + kubectl -n $(NAMESPACE) rollout status deploy/argo-server |
| 50 | + |
| 51 | +# Apply (or update) the WorkflowTemplate |
| 52 | +template: |
| 53 | + kubectl -n $(NAMESPACE) apply -f $(TPL) |
| 54 | + kubectl -n $(NAMESPACE) get workflowtemplate geozarr-convert |
| 55 | + |
| 56 | +# Build + load + install + template (one shot) |
| 57 | +apply: build load-k3d argo-install template |
| 58 | + |
| 59 | +# Submit via CLI (uses env overrides, else PARAMS file) |
| 60 | +submit: _ensure-dirs |
| 61 | + @STAC="$${STAC_URL:-$$(jq -r '.arguments.parameters[] | select(.name=="stac_url").value' $(PARAMS))}"; \ |
| 62 | + OUT="$${OUTPUT_ZARR:-$$(jq -r '.arguments.parameters[] | select(.name=="output_zarr").value' $(PARAMS))}"; \ |
| 63 | + GRP="$${GROUPS:-$$(jq -r '.arguments.parameters[] | select(.name=="groups").value' $(PARAMS))}"; \ |
| 64 | + echo "Submitting:"; echo " stac_url=$$STAC"; echo " output_zarr=$$OUT"; echo " groups=$$GRP"; \ |
| 65 | + WF=$$(argo submit -n $(NAMESPACE) --from workflowtemplate/geozarr-convert \ |
| 66 | + -p stac_url="$$STAC" -p output_zarr="$$OUT" -p groups="$$GRP" -o name); \ |
| 67 | + TSTAMP=$$(date +%Y%m%d-%H%M%S); \ |
| 68 | + argo get -n $(NAMESPACE) $$WF -o json > runs/$${TSTAMP}-$${WF##*/}.json; \ |
| 69 | + argo get -n $(NAMESPACE) $$WF --output wide | tee runs/$${TSTAMP}-$${WF##*/}.summary.txt; \ |
| 70 | + echo "Workflow: $$WF" |
| 71 | + |
| 72 | +# Submit via CLI (PARAMS file only, no env overrides) |
| 73 | +submit-cli: _ensure-dirs |
| 74 | + @WF=$$(argo submit -n $(NAMESPACE) --from workflowtemplate/geozarr-convert \ |
| 75 | + -p stac_url="$$(jq -r '.arguments.parameters[] | select(.name=="stac_url").value' $(PARAMS))" \ |
| 76 | + -p output_zarr="$$(jq -r '.arguments.parameters[] | select(.name=="output_zarr").value' $(PARAMS))" \ |
| 77 | + -p groups="$$(jq -r '.arguments.parameters[] | select(.name=="groups").value' $(PARAMS))" \ |
| 78 | + -o name); \ |
| 79 | + TSTAMP=$$(date +%Y%m%d-%H%M%S); \ |
| 80 | + argo get -n $(NAMESPACE) $$WF -o json > runs/$${TSTAMP}-$${WF##*/}.json; \ |
| 81 | + argo get -n $(NAMESPACE) $$WF --output wide | tee runs/$${TSTAMP}-$${WF##*/}.summary.txt; \ |
| 82 | + echo "Workflow: $$WF" |
| 83 | + |
| 84 | +# Submit via Argo Server HTTP (dev port-forward, no token) |
| 85 | +submit-api: _ensure-dirs |
| 86 | + kubectl -n $(NAMESPACE) port-forward svc/argo-server 2746:2746 >/dev/null 2>&1 & echo $$! > .pf.pid |
| 87 | + sleep 1 |
| 88 | + curl -s -H 'Content-Type: application/json' \ |
| 89 | + --data-binary @$(PARAMS) \ |
| 90 | + http://localhost:2746/api/v1/workflows/$(NAMESPACE)/submit \ |
| 91 | + | tee runs/submit-response.json | jq . >/dev/null || \ |
| 92 | + (echo "Non-JSON response (see runs/submit-response.json)"; exit 1) |
| 93 | + -@[ -f .pf.pid ] && kill $$(cat .pf.pid) 2>/dev/null || true |
| 94 | + -@rm -f .pf.pid |
| 95 | + |
| 96 | +# Inspect |
| 97 | +status: |
| 98 | + argo list -n $(NAMESPACE); echo; kubectl -n $(NAMESPACE) get wf |
| 99 | + |
| 100 | +latest: |
| 101 | + argo get -n $(NAMESPACE) @latest --output wide |
| 102 | + |
| 103 | +logs-save: _ensure-dirs |
| 104 | + @WF=$$(argo list -n $(NAMESPACE) --output name | tail -1); \ |
| 105 | + TSTAMP=$$(date +%Y%m%d-%H%M%S); \ |
| 106 | + argo logs -n $(NAMESPACE) $$WF -c main > logs/$${TSTAMP}-$${WF##*/}.log; \ |
| 107 | + echo "Wrote logs/$${TSTAMP}-$${WF##*/}.log" |
| 108 | + |
| 109 | +# Delete all workflows + completed pods |
| 110 | +clean: |
| 111 | + argo delete -n $(NAMESPACE) --all || true |
| 112 | + kubectl -n $(NAMESPACE) delete pod -l workflows.argoproj.io/completed=true --force --grace-period=0 || true |
| 113 | + |
| 114 | +_ensure-dirs: |
| 115 | + @mkdir -p runs logs |
| 116 | + |
| 117 | +# Fetch from PVC: copy tarball, unpack into runs/<WF>/, pull any extra files on /outputs |
| 118 | +fetch-tar: _ensure-dirs |
| 119 | + @WF=$$(argo list -n $(NAMESPACE) --output name | tail -1 | sed 's#.*/##'); \ |
| 120 | + PVC="$$WF-outpvc"; OUTDIR="runs/$$WF"; \ |
| 121 | + echo "Workflow: $$WF"; echo "PVC: $$PVC"; mkdir -p $$OUTDIR; \ |
| 122 | + kubectl -n $(NAMESPACE) delete pod fetch-$$WF --ignore-not-found >/dev/null 2>&1 || true; \ |
| 123 | + cat <<'YAML' | sed "s/{{WF}}/$$WF/g" | sed "s/{{PVC}}/$$PVC/g" | kubectl -n $(NAMESPACE) apply -f - |
| 124 | + apiVersion: v1 |
| 125 | + kind: Pod |
| 126 | + metadata: |
| 127 | + name: fetch-{{WF}} |
| 128 | + spec: |
| 129 | + restartPolicy: Never |
| 130 | + containers: |
| 131 | + - name: fetch |
| 132 | + image: busybox:1.36 |
| 133 | + command: ["sh","-lc","sleep 600"] |
| 134 | + volumeMounts: |
| 135 | + - name: out |
| 136 | + mountPath: /mnt/out |
| 137 | + volumes: |
| 138 | + - name: out |
| 139 | + persistentVolumeClaim: |
| 140 | + claimName: {{PVC}} |
| 141 | + YAML |
| 142 | + kubectl -n $(NAMESPACE) wait --for=condition=Ready pod/fetch-$$WF --timeout=60s |
| 143 | + # Main artifact |
| 144 | + kubectl -n $(NAMESPACE) cp fetch-$$WF:/mnt/out/geozarr.tar.gz $$OUTDIR/geozarr.tar.gz |
| 145 | + tar -xzf $$OUTDIR/geozarr.tar.gz -C $$OUTDIR |
| 146 | + # Copy any other files (e.g., dask-report.html) |
| 147 | + kubectl -n $(NAMESPACE) cp fetch-$$WF:/mnt/out/. $$OUTDIR/ || true |
| 148 | + kubectl -n $(NAMESPACE) delete pod fetch-$$WF --wait=false |
| 149 | + @echo "Unpacked into $$OUTDIR/" |
| 150 | + |
| 151 | +# Convenience: build + load + template + submit + fetch |
| 152 | +run: apply submit fetch-tar |
| 153 | + |
| 154 | +# Cleanup stray per-run PVCs (removes stored artifacts) |
| 155 | +clean-pvc: |
| 156 | + kubectl -n $(NAMESPACE) delete pvc -l workflows.argoproj.io/workflow 2>/dev/null || true |
0 commit comments