From 67a243b54ac01cd23a7c337488ef1f4b55401373 Mon Sep 17 00:00:00 2001 From: Ilija Vukotic Date: Tue, 31 Oct 2023 15:03:51 -0500 Subject: [PATCH 01/38] init --- docs/deployment/basic.md | 6 +- docs/deployment/production.md | 151 +++++++------ docs/deployment/reference.md | 210 ++++++++++--------- docs/development/contributing.md | 49 +++-- docs/example_secrets.yaml | 79 +++---- helm/example_secrets.yaml | 1 - helm/servicex/templates/app/rmq_ingress.yaml | 32 +++ helm/servicex/values.yaml | 10 +- lite/.github/workflows/ci.yaml | 37 ++++ lite/Dockerfile | 27 +++ lite/README.md | 2 + lite/pyproject.toml | 16 ++ lite/requirements.txt | 2 + lite/resources/deployment.yaml | 0 lite/resources/start.sh | 5 + 15 files changed, 405 insertions(+), 222 deletions(-) create mode 100644 helm/servicex/templates/app/rmq_ingress.yaml create mode 100644 lite/.github/workflows/ci.yaml create mode 100644 lite/Dockerfile create mode 100644 lite/README.md create mode 100644 lite/pyproject.toml create mode 100644 lite/requirements.txt create mode 100644 lite/resources/deployment.yaml create mode 100644 lite/resources/start.sh diff --git a/docs/deployment/basic.md b/docs/deployment/basic.md index a6de0d283..5ed78f33c 100644 --- a/docs/deployment/basic.md +++ b/docs/deployment/basic.md @@ -57,8 +57,8 @@ servicex --namespace init --cert-dir ~/.globus ``` By default, this will look for the certificates in your `~/.globus` directory. -You can pass another directory with the `--cert-dir` argument. It is assumed -that they are named `usercert.pem` and `userkey.pem`. You will be prompted for +You can pass another directory with the `--cert-dir` argument. It is assumed +that they are named `usercert.pem` and `userkey.pem`. You will be prompted for the passphrase that secures your X509 private key. The installed secrets can be used by any ServiceX instance deployed into the @@ -141,7 +141,7 @@ helm install -f values.yaml --version v1.0.0-rc.3 servicex ssl-hep/servicex Initial deployment is typically rapid, with RabbitMQ requiring up to a minute to complete its initialization. The `servicex` argument is used by helm as the release -name. It is used to refer to the chart when deploying, insptacting, or deleting +name. It is used to refer to the chart when deploying, inspecting, or deleting the chart. After this all the pods of the new deployment should be ready. You can check the status of the pods via diff --git a/docs/deployment/production.md b/docs/deployment/production.md index 1592fb924..66ca0f1e8 100644 --- a/docs/deployment/production.md +++ b/docs/deployment/production.md @@ -1,29 +1,31 @@ # ServiceX in production -This guide is aimed at those interested in making production ServiceX +This guide is aimed at those interested in making production ServiceX deployments that are publicly accessible and require authentication. -For a guide to making a simple deployment of ServiceX with no extra features, +For a guide to making a simple deployment of ServiceX with no extra features, check out our [basic deployment guide](basic.md). ## Prerequisites -- A Kubernetes cluster running K8s version 1.16 or later + +- A Kubernetes cluster running K8s version 1.16 or later with an ingress controller such as NGINX. - [Helm 3](https://helm.sh/docs/intro/install/) installed. -- You've used the ServiceX CLI to install your grid certificates on +- You've used the ServiceX CLI to install your grid certificates on your cluster (if not, see the basic guide). -- An initial `values.yaml` file for making working ServiceX deployments, +- An initial `values.yaml` file for making working ServiceX deployments, such as the one in the basic guide. ## External access -It's easy to deploy a ServiceX instance with no external access, but this +It's easy to deploy a ServiceX instance with no external access, but this is of limited value. We will now update `values.yaml` to add external ingress. ### Adding an Ingress to the ServiceX app -Configure an Ingress resource for the ServiceX API server by adding the +Configure an Ingress resource for the ServiceX API server by adding the following section to your values file: + ```yaml app: ingress: @@ -32,34 +34,39 @@ app: host: ``` -The ServiceX API server will be located at a subdomain of the domain name -given in `app.ingress.host`. -The name of the subdomain will match the Helm release name +The ServiceX API server will be located at a subdomain of the domain name +given in `app.ingress.host`. +The name of the subdomain will match the Helm release name (the first position argument provided with the `helm install` command), so the full URL will be `.`. For example, if your values file contains: + ```yaml app: ingress: enabled: true host: servicex.ssl-hep.org ``` + and you deployed the helm chart with + ``` helm install -f values.yaml --version v1.0.0-rc.3 my-release ssl-hep/servicex ``` + then the instance's URL would be `my-release.servicex.ssl-hep.org`. -You should also make sure the host has a DNS A record pointing this +You should also make sure the host has a DNS A record pointing this subdomain at the external IP address of your ingress controller. -The `app.ingress.class` value is used to set the `kubernetes.io/ingress.class` -annotation on the Ingress resource. It defaults to `nginx`, but you can set a +The `app.ingress.class` value is used to set the `kubernetes.io/ingress.class` +annotation on the Ingress resource. It defaults to `nginx`, but you can set a different value, such as `traefik`. ### Adding an Ingress to Minio -ServiceX stores files in a Minio object store which is deployed as a + +ServiceX stores files in a Minio object store which is deployed as a subchart. The Helm chart for Minio has its own support for an Ingress, which we can activate like so: @@ -72,17 +79,19 @@ minio: hostname: my-release-minio.servicex.ssl-hep.org ``` -Unlike the ServiceX Ingress, the subchart doesn't know the name of our +Unlike the ServiceX Ingress, the subchart doesn't know the name of our deployment, so you need to hardcode it in the Minio Ingress host -(this is a current limitation of the Minio chart). +(this is a current limitation of the Minio chart). The value should be `-minio.`. ### Ingress at CERN k8s cluster -For ingress to work at CERN, one needs at least two loadbalancers allowed for your project. + +For ingress to work at CERN, one needs at least two loadbalancers allowed for your project. CERN documentation is [here](https://clouddocs.web.cern.ch/networking/load_balancing.html#kubernetes-service-type-loadbalancer). Start by turning off the charts ingress: + ```yaml app: ingress: @@ -90,6 +99,7 @@ app: ``` Create loadbalancer service like this: + ```yaml apiVersion: v1 kind: ServiceX @@ -109,6 +119,7 @@ spec: ``` Verify that you can access it using just IP, then create a DNS for it: + ``` openstack loadbalancer set --description my-domain-name ServiceX-LB ping my-domain-name.cern.ch @@ -118,32 +129,35 @@ Once service is accessible from inside CERN, you may ask for the firewall to be The procedure should be repeated for MinIO. ## Configuring Ingress resources to use TLS + It's a good idea to enable TLS for both of these Ingress resources. There are two ways to do this: you can either obtain certificates and -install the TLS Secrets manually, or you can use the -[cert-manager](https://cert-manager.io/docs/) Kubernetes add-on to +install the TLS Secrets manually, or you can use the +[cert-manager](https://cert-manager.io/docs/) Kubernetes add-on to issue certificates and create the Secrets automatically. Separate guides for both options are provided below. Either way, the first step is to set `app.ingress.tls.enabled` to `true`. ### Without cert-manager -First, obtain a TLS certificate and private key for each Ingress + +First, obtain a TLS certificate and private key for each Ingress (two pairs in total). -This can be done using a trusted Certificate Authority (CA), such as +This can be done using a trusted Certificate Authority (CA), such as [Let's Encrypt](https://letsencrypt.org/). -Make sure that each certificate Common Name matches the hostname of the +Make sure that each certificate Common Name matches the hostname of the corresponding Ingress. Once you have your certs, you can install them to your cluster as [TLS Secrets](https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets): `kubectl create secret tls --cert= --key=` -By default, the ServiceX chart looks for a Secret named -`-app-tls`. You can specify a different name in your values +By default, the ServiceX chart looks for a Secret named +`-app-tls`. You can specify a different name in your values using `app.ingress.tls.secretName`. Your final values should look something like: + ```yaml app: ingress: @@ -156,6 +170,7 @@ app: Adding TLS to the Minio subchart is slightly different. The configuration is as follows: + ```yaml minio: apiIngress: @@ -169,24 +184,27 @@ minio: - my-release-minio.servicex.ssl-hep.org secretName: my-release-minio-tls ``` -Remember to replace `my-release` and `servicex.ssl-hep.org` with your Helm release name and app ingress host, respectively. + +Remember to replace `my-release` and `servicex.ssl-hep.org` with your Helm release name and app ingress host, respectively. Here, you must specify a secret name; there is no default. ### With cert-manager + Alternately, you can let cert-manager handle the TLS certificates. To use it, complete the following steps: - [Install cert-manager](https://cert-manager.io/docs/installation/kubernetes/) on your cluster if it's not already installed. -- Deploy one or more ClusterIssuers, or check that one is already present. +- Deploy one or more ClusterIssuers, or check that one is already present. The Let's Encrypt staging and production ClusterIssuers are recommended. - In `values.yaml`, set `app.ingress.tls.clusterIssuer` to the name of the ClusterIssuer you'd like to use (e.g. `letsencrypt-prod`). -Browsers will trust `letsencrypt-prod` automatically, but bear in mind that +Browsers will trust `letsencrypt-prod` automatically, but bear in mind that it's subject to [rate limits](https://letsencrypt.org/docs/rate-limits/), so it's best to use `letsencrypt-staging` for development. Your values should now look like: + ```yaml app: ingress: @@ -198,6 +216,7 @@ app: For more information, see the cert-manager [guide to securing nginx-ingress](https://cert-manager.io/docs/tutorials/acme/ingress). To enable TLS for Minio, use the following configuration: + ```yaml minio: apiIngress: @@ -213,31 +232,34 @@ minio: - my-release-minio.servicex.ssl-hep.org secretName: my-release-minio-tls ``` -Once again, remember to replace `my-release` and `servicex.ssl-hep.org` with + +Once again, remember to replace `my-release` and `servicex.ssl-hep.org` with your Helm release name and app ingress host, respectively. ## Securing the deployment with authentication + If you wish, you could deploy these values and have a ServiceX instance that is not secured but is reachable via the public URL. -This is okay for a sneak peek, but not recommended for long-lived deployments, +This is okay for a sneak peek, but not recommended for long-lived deployments, since your grid certs will be usable by anyone on the Internet. -To prevent this, ServiceX supports an authentication system which requires -new users to create accounts with your ServiceX deployment by authenticating -to Globus with the identity provider of their choice +To prevent this, ServiceX supports an authentication system which requires +new users to create accounts with your ServiceX deployment by authenticating +to Globus with the identity provider of their choice (such as CERN or their university). - ### Setting up Globus Auth + Globus Auth requires your deployment to be served over HTTPS, so make sure you have completed the TLS section above. -Visit [developers.globus.org](https://developers.globus.org) -and select ___Register your app with Globus___. -Create a project for ServiceX and within that project click on +Visit [developers.globus.org](https://developers.globus.org) +and select ___Register your app with Globus___. +Create a project for ServiceX and within that project click on ___Add new app___. The app name can be whatever you like. The scopes should include: + ``` openid email @@ -253,18 +275,21 @@ If you want to use port-forwarding, also include Save the record. Copy the Client ID and paste this into your `values.yaml`. + ```yaml app: globusClientID: ``` Generate a Client Secret and paste this value into `values.yaml` as well: + ```yaml app: globusClientSecret: ``` Finally, you can enable authentication in `values.yaml`: + ```yaml app: auth: true @@ -272,43 +297,46 @@ app: ``` The system works as follows: -- New users will be required to create accounts with their Globus logins. -- New accounts will be pending, and cannot make requests until approved. + +- New users will be required to create accounts with their Globus logins. +- New accounts will be pending, and cannot make requests until approved. - Accounts must be approved by a ServiceX admin. -- To bootstrap the initial admin account, you must set `app.adminEmail` +- To bootstrap the initial admin account, you must set `app.adminEmail` to the email address associated with the administrator's Globus account. ### Approving new accounts from Slack -ServiceX can send notifications of new user registrations to the Slack channel -of your choice and allow administrators to approve pending users directly from -Slack. -This is strongly recommended for convenience, as currently the only other way -to approve accounts is to manually send HTTP requests to the API server via + +ServiceX can send notifications of new user registrations to the Slack channel +of your choice and allow administrators to approve pending users directly from +Slack. +This is strongly recommended for convenience, as currently the only other way +to approve accounts is to manually send HTTP requests to the API server via a tool like Postman or curl. -To set this up, complete the following steps **before deploying** ServiceX: +To set this up, complete the following steps __before deploying__ ServiceX: -- Create a secure Slack channel in your workspace (suggested name: `#servicex-signups`), accessible only to developers or administrators of ServiceX. -- Go to https://api.slack.com/apps and click **Create New App**. -Fill in ServiceX as the name and choose your workspace. -If you are going to make multiple ServiceX deployments, +- Create a secure Slack channel in your workspace (suggested name: `#servicex-signups`), accessible only to developers or administrators of ServiceX. +- Go to and click __Create New App__. +Fill in ServiceX as the name and choose your workspace. +If you are going to make multiple ServiceX deployments, you may want a more descriptive name, such as "ServiceX xAOD". -- Scroll down to the App Credentials section and find your Signing Secret. +- Scroll down to the App Credentials section and find your Signing Secret. Copy this value and place it in your values file as `app.slackSigningSecret`. - Scroll up to the feature list, click on Incoming Webhooks, and click the switch to turn them on. -- Click the **Add New Webhook to Workspace** button at the bottom, choose your signups channel, and click the **Allow** button. +- Click the __Add New Webhook to Workspace__ button at the bottom, choose your signups channel, and click the __Allow__ button. - Copy the Webhook URL and store it in your values under `app.newSignupWebhook`. - After completing the rest of the configuration, deploy ServiceX. - Go back to the [Slack App dashboard](https://api.slack.com/apps) and choose the app you created earlier. In the sidebar, click on Interactivity & Shortcuts under Features. -- Click the switch to turn Interactivity on. In the Request URL field, enter the base URL for the ServiceX API, followed by `/slack`, e.g. +- Click the switch to turn Interactivity on. In the Request URL field, enter the base URL for the ServiceX API, followed by `/slack`, e.g. `https://my-release.servicex.ssl-hep.org/slack`. Save your changes. - You're all set! ServiceX will now send interactive Slack notifications to your signups channel whenever a new user registers. ### Email Notifications -ServiceX can send email notifications to newly registered users via -[Mailgun](https://www.mailgun.com/) once their access has been approxed by an -administrator. To enable this, obtain a Mailgun API key and -[verified domain](https://documentation.mailgun.com/en/latest/quickstart-sending.html#verify-your-domain) + +ServiceX can send email notifications to newly registered users via +[Mailgun](https://www.mailgun.com/) once their access has been approxed by an +administrator. To enable this, obtain a Mailgun API key and +[verified domain](https://documentation.mailgun.com/en/latest/quickstart-sending.html#verify-your-domain) and set `app.mailgunApiKey` and `app.mailgunDomain` in your values file`. ## Scaling @@ -326,11 +354,14 @@ rabbitmq: cpu: 100m replicas: 3 ``` + ## Using SealedSecrets to Keep All Config In GitHub + We use Bitnami's Sealed Secrets Controller to allow us to check all of the -config into GitHub. +config into GitHub. Install sealed secrets helm chart + ```bash helm repo add sealed-secrets https://bitnami-labs.github.io/sealed-secrets helm install sealed-secrets --namespace kube-system sealed-secrets/sealed-secrets @@ -339,8 +370,9 @@ Install sealed secrets helm chart You will need the `kubeseal` command on your computer. Follow instructions for [the various options](https://github.com/bitnami-labs/sealed-secrets#homebrew) -Create a secrets file using the [example_secrets.yaml](../example_secrets.yaml). -Encrypt it using kubeseal with +Create a secrets file using the [example_secrets.yaml](../example_secrets.yaml). +Encrypt it using kubeseal with + ```console cat deployed_values/dev-secrets.yaml | kubeseal --controller-namespace kube-system --controller-name sealed-secrets --format yaml > deployed_values/dev-sealed-secrets.yaml ``` @@ -350,6 +382,7 @@ that file into the cluster, it will be unsealed and turned into a plaintext secr that can be mounted into the App's deployment as env vars. ## Autoscaling + ServiceX should automatically scale up/down number of transformers. For this to work it uses Horizontal Pod Autoscaler (HPA). For the HPA to work, k8s cluster needs to be able to measure CPU utilization of the pods. This is easiest enabled by installing [metric-server](https://github.com/kubernetes-sigs/metrics-server). The latest one is easily installed and supports up to 100 nodes by default: ```bash diff --git a/docs/deployment/reference.md b/docs/deployment/reference.md index 77670ea1e..63593dd97 100644 --- a/docs/deployment/reference.md +++ b/docs/deployment/reference.md @@ -6,109 +6,111 @@ The following table lists the configurable parameters of the ServiceX chart and their default values. Note that you may also wish to change some of the default parameters for the [rabbitMQ](https://github.com/bitnami/charts/tree/master/bitnami/rabbitmq) or [minio](https://github.com/minio/charts) subcharts. -| Parameter | Description | Default | +| Parameter | Description | Default | |--------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------| -| `secrets` | Name of a secret deployed into the cluster. Must follow example_secrets.yaml | - | -| `logging.logstash.enabled` | Enable remote logging | true | -| `logging.logstash.host` | Host running logstash listening for log data | `servicex.atlas-ml.org` | -| `logging.logstash.port` | Port to send logging to | 5959 | -| `logging.logstash.protocol` | Protocol to be used (options are TCP and UDP) | TCP | -| `logging.logstash.monitor` | Link to be shown inside Monitor web page iframe | UC Kibana dashboard | -| `logging.logstash.logs` | Link to be shown inside Logs web page iframe | UC Kibana dashboard | -| `app.image` | ServiceX_App image name | `sslhep/servicex_app` | -| `app.tag` | ServiceX image tag | `latest` | -| `app.logLevel` | Logging level for ServiceX web app (uses standard unix levels) | `WARNING` | -| `app.pullPolicy` | ServiceX image pull policy | `Always` | -| `app.checksImage` | ServiceX init container image for checks | `ncsa/checks:latest` | -| `app.rabbitmq.retries` | Number of times to retry connecting to RabbitMQ on startup | 12 | -| `app.rabbitmq.retry_interval` | Number of seconds to wait between RabbitMQ retries on startup | 10 | -| `app.replicas` | Number of App pods to start. Experimental! | 1 | -| `app.auth` | Enable authentication or allow unfettered access (Python boolean string) | `false` | -| `app.globusClientID` | Globus application Client ID | - | -| `app.globusClientSecret` | Globus application Client Secret | - | -| `app.adminEmail` | Email address for initial admin user | admin@example.com | -| `app.tokenExpires` | Seconds until the ServiceX API tokens (JWT refresh tokens) expire | False (never) | -| `app.authExpires` | Seconds until the JWT access tokens expire | 21600 (six hours) | -| `app.ingress.enabled` | Enable install of ingress | false | -| `app.ingress.class` | Class to be set in `kubernetes.io/ingress.class` annotation | nginx | -| `app.ingress.host` | Hostname to associate ingress with | servicex.ssl-hep.org | -| `app.ingress.defaultBackend` | Name of a service to send requests to internal endpoints to | default-http-backend | -| `app.ingress.tls.enabled` | Enable TLS for ServiceX API Ingress resource | false | -| `app.ingress.tls.secretName` | Name of TLS Secret used for ServiceX API server | `{{.Release.Name}}-app-tls` | -| `app.ingress.tls.clusterIssuer` | Specify a ClusterIssuer if using cert-manager | - | -| `app.resources` | Pass in Kubernetes pod resource spec to deployment to change CPU and memory | { } | -| `app.slackSigningSecret` | Signing secret for Slack application | - | -| `app.newSignupWebhook` | Slack webhook URL for new signups | - | -| `app.mailgunApiKey` | API key to send Mailgun emails to newly approved users | - | -| `app.mailgunDomain` | Sender domain for emails (should be verified through Mailgun) | - | -| `app.defaultDIDFinderScheme` | DID Finder scheme if none provided in request. If left blank, template will attempt to guess. | - | -| `app.validateTransformerImage` | Should docker image name be validated at DockerHub? | `true` | - | `app.defaultUsers` | Name of secret holding json file with default users to create on deployment | - | -| `didFinder.rucio.enabled` | Should we deploy the Rucio DID Finder? | `true` | -| `didFinder.rucio.image` | Rucio DID Finder image name | `sslhep/servicex-did-finder` | -| `didFinder.rucio.tag` | Rucio DID Finder image tag | `latest` | -| `didFinder.rucio.pullPolicy` | Rucio DID Finder image pull policy | `Always` | -| `didFinder.rucio.servicex_latitude` | Latitude of the computing center where ServiceX runs. Will be used by Rucio to return the closest input data replica. | 41.78 | -| `didFinder.rucio.servicex_longitude` | Longitude of the computing center where ServiceX runs. Will be used by Rucio to return the closest input data replica. | -87.7 | -| `didFinder.rucio.reportLogicalFiles` | For CMS xCache sites, we don't want the replicas, only logical names. Set to true to get this behavior | false | -| `didFinder.rucio.rucio_host` | URL for Rucio service to use | `https://voatlasrucio-server-prod.cern.ch:443` | -| `didFinder.rucio.auth _host` | URL to obtain Rucio authentication | `https://voatlasrucio-auth-prod.cern.ch:443` | -| `didFinder.rucio.memcache.enabled` | Should use memcache to store results returned by the DID lookup? | true | -| `didFinder.rucio.memcache.image` | Docker image for memcache | memcached | -| `didFinder.rucio.memcache.tag` | Tag of the memcache image | alpine | -| `didFinder.rucio.memcache.ttl` | How long should memcache results be considered valid (in seconds) | 86400 | -| `didFinder.CERNOpenData.enabled` | Should we deploy the CERN OpenData DID Finder? `true` | -| `didFinder.CERNOpenData.image` | CERN OpenData DID Finder image name | `sslhep/servicex-did-finder` | -| `didFinder.CERNOpenData.tag` | CERN OpenData DID Finder image tag | `latest` | -| `didFinder.CERNOpenData.pullPolicy` | CERN OpenData DID Finder image pull policy | `Always` | -| `codegen.atlasxaod.enabled` | Deploy the ATLAS xAOD Code generator? | true | -| `codegen.atlasxaod.image` | Code generator image | `sslhep/servicex_code_gen_func_adl_xaod` | -| `codegen.atlasxaod.pullPolicy` | | true | -| `codegen.atlasxaod.tag` | Code generator image tag | develop | -| `codegen.atlasxaod.defaultScienceContainerImage` | The transformer image that should be run against this generated code | `sslhep/servicex_func_adl_xaod_transformer` | -| `codegen.atlasxaod.defaultScienceContainerTag` | Tag for the transformer image that should be run against this generated code | develop | -|`codegen.uproot.enabled` | Deploy the uproot code generator? - also all of the code gen settings, above are available | true | -|`codegen.cms.enabled` | Deploy the CMS AOD code generator? - also all of the code gen settings, above are available | true | -|`codegen.python.enabled` | Deploy the python uproot code generator? - also all of the code gen settings, above are available | true | -| `x509Secrets.image` | X509 Secret Service image name | `sslhep/x509-secrets` | -| `x509Secrets.tag` | X509 Secret Service image tag | `latest` | -| `x509Secrets.pullPolicy` | X509 Secret Service image pull policy | `Always` | -| `x509Secrets.vomsOrg` | Which VOMS org to contact for proxy? | `atlas` | -| `x509Secrets.initImage` | X509 Secret Service init container image | `alpine:3.6` | -| `rbacEnabled` | Specify if rbac is enabled in your cluster | `true` | -| `hostMount` | Optional path to mount in transformers as /data | - | -| `gridAccount` | CERN User account name to access Rucio | - | -| `noCerts` | Set to true to disable x509 certs and only use open data | false | -| `rabbitmq.password` | Override the generated RabbitMQ password | leftfoot1 | -| `objectstore.enabled` | Deploy a minio object store with Servicex? | true | -| `objectstore.internal` | Deploy a captive minio instance with this chart? | true | -| `objectstore.publicURL` | What URL should the client use to download files? If set, this is given whether ingress is enabled or not | nil | -| `postgres.enabled` | Deploy a postgres database into cluster? If not, we use a sqllite db | false | -| `minio.auth.rootUser` | Username to log into minio | miniouser | -| `minio.auth.rootPassword` | Password key to log into minio | leftfoot1 | -| `minio.apiIngress.enabled` | Should minio chart deploy an ingress to the service? | false | -| `minio.apiIngress.hostname` | Hostname associate with ingress controller | nil | -| `transformer.cachePrefix` | Prefix string to stick in front of file paths. Useful for XCache | | -| `transformer.autoscaler.enabled` | Enable/disable horizontal pod autoscaler for transformers | True | -| `transformer.autoscaler.cpuScaleThreshold` | CPU percentage threshold for pod scaling | 30 | -| `transformer.autoscaler.minReplicas` | Minimum number of transformer pods per request | 1 | -| `transformer.autoscaler.maxReplicas` | Maximum number of transformer pods per request | 20 | -| `transformer.pullPolicy` | Pull policy for transformer pods (Image name specified in REST Request) | Always | -| `transformer.priorityClassName` | priorityClassName for transformer pods (Not setting it means getting global default) | Not Set | -| `transformer.cpuLimit` | Set CPU resource limit for pod in number of cores | 1 | -| `transformer.sidecarImage` | Image name for the transformer sidecar container that hold the serviceX code | 'sslhep/servicex_sidecar_transformer' | -| `transformer.sidecarTag` | Tag for the sidecar container | 'develop' | -| `transformer.sidecarPullPolicy` | Pull Policy for the sidecar container | 'Always' | -| `transformer.persistence.existingClaim` | Existing persistent volume claim | nil | -| `transformer.subdir` | Subdirectory of the mount to write transformer results to (should end with trailing /) | nil | -| `minioCleanup.enabled` | Enable deployment of minio cleanup service | false | -| `minioCleanup.image` | Default image for minioCleanup cronjob | `sslhep/servicex_minio_cleanup` | -| `minioCleanup.tag` | minioCleanup image tag | | -| `minioCleanup.pullPolicy` | minioCleanup image pull policy | `Always` | -| `minioCleanup.threads` | Number of threads to use when processing S3 Storage | 6 | -| `minioCleanup.logLevel` | Log level to use for logging (e.g. DEBUG, INFO, WARN, ERROR, FATAL) | INFO | +| `secrets` | Name of a secret deployed into the cluster. Must follow example_secrets.yaml | - | +| `logging.logstash.enabled` | Enable remote logging | true | +| `logging.logstash.host` | Host running logstash listening for log data| `servicex.atlas-ml.org` | +| `logging.logstash.port` | Port to send logging to| 5959 | +| `logging.logstash.protocol` | Protocol to be used (options are TCP and UDP) | TCP | +| `logging.logstash.monitor` | Link to be shown inside Monitor web page iframe | UC Kibana dashboard | +| `logging.logstash.logs` | Link to be shown inside Logs web page iframe| UC Kibana dashboard | +| `app.image` | ServiceX_App image name| `sslhep/servicex_app` | +| `app.tag` | ServiceX image tag | `latest` | +| `app.logLevel` | Logging level for ServiceX web app (uses standard unix levels) | `WARNING` | +| `app.pullPolicy` | ServiceX image pull policy | `Always` | +| `app.checksImage` | ServiceX init container image for checks | `ncsa/checks:latest` | +| `app.rabbitmq.retries`| Number of times to retry connecting to RabbitMQ on startup | 12 | +| `app.rabbitmq.retry_interval` | Number of seconds to wait between RabbitMQ retries on startup | 10 | +| `app.replicas` | Number of App pods to start. Experimental! | 1 | +| `app.auth` | Enable authentication or allow unfettered access (Python boolean string) | `false` | +| `app.globusClientID` | Globus application Client ID | - | +| `app.globusClientSecret` | Globus application Client Secret | - | +| `app.adminEmail` | Email address for initial admin user | | +| `app.tokenExpires` | Seconds until the ServiceX API tokens (JWT refresh tokens) expire| False (never) | +| `app.authExpires` | Seconds until the JWT access tokens expire | 21600 (six hours) | +| `app.ingress.enabled` | Enable install of ingress | false| +| `app.ingress.class` | Class to be set in `kubernetes.io/ingress.class` annotation | nginx| +| `app.ingress.host` | Hostname to associate ingress with | servicex.ssl-hep.org | +| `app.ingress.defaultBackend` | Name of a service to send requests to internal endpoints to | default-http-backend | +| `app.ingress.tls.enabled` | Enable TLS for ServiceX API Ingress resource| false| +| `app.ingress.tls.secretName` | Name of TLS Secret used for ServiceX API server | `{{.Release.Name}}-app-tls` | +| `app.ingress.tls.clusterIssuer` | Specify a ClusterIssuer if using cert-manager | - | +| `app.resources` | Pass in Kubernetes pod resource spec to deployment to change CPU and memory | { } | +| `app.slackSigningSecret` | Signing secret for Slack application | - | +| `app.newSignupWebhook`| Slack webhook URL for new signups | - | +| `app.mailgunApiKey` | API key to send Mailgun emails to newly approved users | - | +| `app.mailgunDomain` | Sender domain for emails (should be verified through Mailgun) | - | +| `app.defaultDIDFinderScheme` | DID Finder scheme if none provided in request. If left blank, template will attempt to guess. | - | +| `app.validateTransformerImage` | Should docker image name be validated at DockerHub? | `true` | + | `app.defaultUsers` | Name of secret holding json file with default users to create on deployment | - | +| `didFinder.rucio.enabled` | Should we deploy the Rucio DID Finder? | `true` | +| `didFinder.rucio.image` | Rucio DID Finder image name | `sslhep/servicex-did-finder` | +| `didFinder.rucio.tag` | Rucio DID Finder image tag | `latest` | +| `didFinder.rucio.pullPolicy` | Rucio DID Finder image pull policy | `Always` | +| `didFinder.rucio.servicex_latitude` | Latitude of the computing center where ServiceX runs. Will be used by Rucio to return the closest input data replica. | 41.78| +| `didFinder.rucio.servicex_longitude` | Longitude of the computing center where ServiceX runs. Will be used by Rucio to return the closest input data replica. | -87.7| +| `didFinder.rucio.reportLogicalFiles` | For CMS xCache sites, we don't want the replicas, only logical names. Set to true to get this behavior | false| +| `didFinder.rucio.rucio_host` | URL for Rucio service to use | `https://voatlasrucio-server-prod.cern.ch:443` | +| `didFinder.rucio.auth _host` | URL to obtain Rucio authentication | `https://voatlasrucio-auth-prod.cern.ch:443` | +| `didFinder.rucio.memcache.enabled` | Should use memcache to store results returned by the DID lookup? | true | +| `didFinder.rucio.memcache.image` | Docker image for memcache | memcached | +| `didFinder.rucio.memcache.tag` | Tag of the memcache image | alpine | +| `didFinder.rucio.memcache.ttl` | How long should memcache results be considered valid (in seconds)| 86400| +| `didFinder.CERNOpenData.enabled` | Should we deploy the CERN OpenData DID Finder? | `true` | +| `didFinder.CERNOpenData.image` | CERN OpenData DID Finder image name | `sslhep/servicex-did-finder` | +| `didFinder.CERNOpenData.tag` | CERN OpenData DID Finder image tag | `latest` | +| `didFinder.CERNOpenData.pullPolicy` | CERN OpenData DID Finder image pull policy | `Always` | +| `codegen.atlasxaod.enabled` | Deploy the ATLAS xAOD Code generator? | true | +| `codegen.atlasxaod.image` | Code generator image | `sslhep/servicex_code_gen_func_adl_xaod` | +| `codegen.atlasxaod.pullPolicy` | | true | +| `codegen.atlasxaod.tag` | Code generator image tag | develop | +| `codegen.atlasxaod.defaultScienceContainerImage` | The transformer image that should be run against this generated code | `sslhep/servicex_func_adl_xaod_transformer` | +| `codegen.atlasxaod.defaultScienceContainerTag` | Tag for the transformer image that should be run against this generated code | develop | +|`codegen.uproot.enabled` | Deploy the uproot code generator? - also all of the code gen settings, above are available | true | +|`codegen.cms.enabled` | Deploy the CMS AOD code generator? - also all of the code gen settings, above are available | true | +|`codegen.python.enabled` | Deploy the python uproot code generator? - also all of the code gen settings, above are available | true | +| `x509Secrets.image` | X509 Secret Service image name | `sslhep/x509-secrets` | +| `x509Secrets.tag` | X509 Secret Service image tag | `latest` | +| `x509Secrets.pullPolicy` | X509 Secret Service image pull policy | `Always` | +| `x509Secrets.vomsOrg` | Which VOMS org to contact for proxy? | `atlas` | +| `x509Secrets.initImage` | X509 Secret Service init container image | `alpine:3.6` | +| `rbacEnabled` | Specify if rbac is enabled in your cluster | `true` | +| `hostMount` | Optional path to mount in transformers as /data | - | +| `gridAccount` | CERN User account name to access Rucio | - | +| `noCerts` | Set to true to disable x509 certs and only use open data | false| +| `rabbitmq.auth.password` | Override the generated RabbitMQ password | leftfoot1 | +| `rabbitmq.apiIngress.enabled` | Should RMQ chart deploy an ingress to the service? | false | +| `rabbitmq.apiIngress.host` | RMQ ingress hostname | servicex-rmq.ssl-hep.org | +| `rabbitmq.apiIngress.class` | RMQ ingress class | nginx | +| `objectstore.enabled` | Deploy a minio object store with Servicex? | true | +| `objectstore.internal`| Deploy a captive minio instance with this chart? | true | +| `objectstore.publicURL` | What URL should the client use to download files? If set, this is given whether ingress is enabled or not | nil | +| `postgres.enabled` | Deploy a postgres database into cluster? If not, we use a sqllite db | false| +| `minio.auth.rootUser` | Username to log into minio | miniouser | +| `minio.auth.rootPassword` | Password key to log into minio | leftfoot1 | +| `minio.apiIngress.enabled` | Should minio chart deploy an ingress to the service? | false| +| `minio.apiIngress.hostname` | Hostname associate with ingress controller | nil | +| `transformer.cachePrefix` | Prefix string to stick in front of file paths. Useful for XCache | | +| `transformer.autoscaler.enabled` | Enable/disable horizontal pod autoscaler for transformers | True | +| `transformer.autoscaler.cpuScaleThreshold` | CPU percentage threshold for pod scaling | 30 | +| `transformer.autoscaler.minReplicas` | Minimum number of transformer pods per request | 1 | +| `transformer.autoscaler.maxReplicas` | Maximum number of transformer pods per request | 20 | +| `transformer.pullPolicy` | Pull policy for transformer pods (Image name specified in REST Request) | Always | +| `transformer.priorityClassName` | priorityClassName for transformer pods (Not setting it means getting global default) | Not Set | +| `transformer.cpuLimit`| Set CPU resource limit for pod in number of cores | 1 | +| `transformer.sidecarImage` | Image name for the transformer sidecar container that hold the serviceX code | 'sslhep/servicex_sidecar_transformer' | +| `transformer.sidecarTag` | Tag for the sidecar container | 'develop' | +| `transformer.sidecarPullPolicy` | Pull Policy for the sidecar container | 'Always' | +| `transformer.persistence.existingClaim` | Existing persistent volume claim | nil | +| `transformer.subdir` | Subdirectory of the mount to write transformer results to (should end with trailing /) | nil | +| `minioCleanup.enabled`| Enable deployment of minio cleanup service | false| +| `minioCleanup.image` | Default image for minioCleanup cronjob | `sslhep/servicex_minio_cleanup` | +| `minioCleanup.tag` | minioCleanup image tag | | +| `minioCleanup.pullPolicy` | minioCleanup image pull policy | `Always` | +| `minioCleanup.threads`| Number of threads to use when processing S3 Storage | 6 | +| `minioCleanup.logLevel` | Log level to use for logging (e.g. DEBUG, INFO, WARN, ERROR, FATAL) | INFO | | `minioCleanup.schedule` | Schedule for minioCleanup cronjob. See [reference](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntax) for details on fields | `* */8 * * *` (every 8 hours) | -| `minioCleanup.maxAge` | Max age in days before removing results | 30 | -| `minioCleanup.maxSize` | Start removing buckets when total space used reaches this number (can use G,M, T suffixes) | '1G' | -| `minioCleanup.normSize` | Size at which to stop removing buckets | '700M' | | ->>>>>>> origin +| `minioCleanup.maxAge` | Max age in days before removing results | 30 | +| `minioCleanup.maxSize`| Start removing buckets when total space used reaches this number (can use G,M, T suffixes) | '1G' | +| `minioCleanup.normSize` | Size at which to stop removing buckets | '700M' | | diff --git a/docs/development/contributing.md b/docs/development/contributing.md index e059d5513..6220be61f 100644 --- a/docs/development/contributing.md +++ b/docs/development/contributing.md @@ -4,10 +4,10 @@ Welcome to the ServiceX contributor guide, and thank you for your interest in co ## Overview -ServiceX uses a microservice architecture, -and is designed to be hosted on a Kubernetes cluster. -The ServiceX project uses a polyrepo strategy for source code management: -the source code for each microservice is located in a dedicated repo. +ServiceX uses a microservice architecture, +and is designed to be hosted on a Kubernetes cluster. +The ServiceX project uses a polyrepo strategy for source code management: +the source code for each microservice is located in a dedicated repo. Below is a partial list of these repositories: @@ -21,80 +21,97 @@ Please read our [architecture document](https://servicex.readthedocs.io/en/lates ## Branching Strategy -ServiceX uses a slightly modified GitLab flow. Each repository has a main branch, usually named `develop` (or `master` for the Python frontend). All changes should be made on feature branches and submitted as PRs to the main branch. Releases are frozen on dedicated release branches, e.g. `v1.0.0-RC.2`. +ServiceX uses a slightly modified GitLab flow. Each repository has a main branch, usually named `develop` (or `master` for the Python frontend). All changes should be made on feature branches and submitted as PRs to the main branch. Releases are frozen on dedicated release branches, e.g. `v1.0.0-RC.2`. ## Development Workflow 1. Set up a local development environment: - - Decide which microservice (or Helm chart) you'd like to change, - and locate the corresponding repository. - - If you are a not a member of the `ssl-hep` GitHub organization, + - Decide which microservice (or Helm chart) you'd like to change, + and locate the corresponding repository. + - If you are a not a member of the `ssl-hep` GitHub organization, fork the repository. - Clone the (forked) repository to your local machine: + ``` git clone git@github.com:/ServiceX_App.git ``` + - If you created a fork, add the upstream repository as remote: + ``` git remote add upstream git@github.com:ssl-hep/ServiceX_App.git ``` + - Set up a new environment via ``conda`` or ``virtualenv``. - Install dependencies, including test dependencies: + ``` python3 -m pip install -e .[test] ``` + - If the root directory contains a file named `.pre-commit-config.yaml`, you can install the [pre-commit](https://pre-commit.com/) hooks with: + ``` pip install pre-commit pre-commit install ``` + 1. Develop your contribution: - Pull latest changes from upstream: + ``` git checkout develop git pull upstream develop ``` + - Create a branch for the feature you want to work on: + ``` git checkout -b fix-issue-99 ``` + - Commit locally as you progress with `git add` and `git commit`. 1. Test your changes: - Run the full test suite with `python -m pytest`, or target specific test files with `python -m pytest tests/path/to/file.py`. - Please write new unit tests to cover any changes you make. - You can also manually test microservice changes against a full ServiceX deployment by building the Docker image, pushing it to DockerHub, and setting the `image` and `tag` values as follows: + ```yaml app: image: / tag: my-feature-branch - ``` - - For more details, please read our full - [deployment guide](https://servicex.readthedocs.io/en/latest/deployment/basic). + ``` + + - For more details, please read our full + [deployment guide](https://servicex.readthedocs.io/en/latest/deployment/basic). 1. Submit a pull request to the upstream repository. ## Issues + Please submit issues for bugs and feature requests to the [main ServiceX repository](https://github.com/ssl-hep/ServiceX), unless the issue is specific to a single microservice. We manage project priorities with a [ZenHub board](https://app.zenhub.com/workspaces/servicex-5caba4288d0ceb76ea94ae1f/board?repos=180217333,180236972,185614791,182823774,202592339). ## Join us on Slack + We coordinate our efforts on the [IRIS-HEP Slack](http://iris-hep.slack.com). Come join this intellectual hub! ## Debugging Tips -Microservice architectures can be difficult to test and debug. Here are some + +Microservice architectures can be difficult to test and debug. Here are some helpful hints to make this easier. 1. Instead of relying on the DID Finder to locate some particular datafile, you -can mount one of your local directories into the transformer pod and then +can mount one of your local directories into the transformer pod and then instruct the DID Finder to always offer up the path to that file regardless of the submitted DID. You can use the `hostMount` value to have a local directory -mounted into each transformer pod under `/data`. You can use the +mounted into each transformer pod under `/data`. You can use the `didFinder.staticFile` value to instruct DID Finder to offer up a file from that directory. -2. You can use port-forwarding to expose port 15672 from the RabbitMQ pod to +2. You can use port-forwarding to expose port 15672 from the RabbitMQ pod to your laptop and log into the Rabbit admin console using the username: `user` and password `leftfoot1`. From here you can monitor the queues, purge old messages and inject your own messages @@ -103,4 +120,4 @@ and inject your own messages ### Hotfixes -If a critical bugfix or hotfix must be applied to a previous release, it should be merged to the main branch and then applied to each affected release branch using `git cherry-pick -m 1`. Merge commits have 2 parents, so the `-m 1` flag is used to specify that the first parent (i.e. previous commit on the main branch) should be used +If a critical bugfix or hotfix must be applied to a previous release, it should be merged to the main branch and then applied to each affected release branch using `git cherry-pick -m 1`. Merge commits have 2 parents, so the `-m 1` flag is used to specify that the first parent (i.e. previous commit on the main branch) should be used diff --git a/docs/example_secrets.yaml b/docs/example_secrets.yaml index fade39e8f..d37855d3e 100644 --- a/docs/example_secrets.yaml +++ b/docs/example_secrets.yaml @@ -1,43 +1,46 @@ { "kind": "SealedSecret", "apiVersion": "bitnami.com/v1alpha1", - "metadata": { - "name": "servicex-secrets", - "namespace": "servicex-namespace", - "creationTimestamp": null, - "annotations": { - "sealedsecrets.bitnami.com/namespace-wide": "true" - } - }, - "spec": { - "template": { - "metadata": { - "name": "servicex-secrets", - "namespace": "servicex-namespace", - "creationTimestamp": null, - "annotations": { - "sealedsecrets.bitnami.com/managed": "true", - "sealedsecrets.bitnami.com/namespace-wide": "true" - } - }, - "type": "Opaque", - "data": null + "metadata": + { + "name": "servicex-secrets", + "namespace": "servicex-namespace", + "creationTimestamp": null, + "annotations": { "sealedsecrets.bitnami.com/namespace-wide": "true" }, + }, + "spec": + { + "template": + { + "metadata": + { + "name": "servicex-secrets", + "namespace": "servicex-namespace", + "creationTimestamp": null, + "annotations": + { + "sealedsecrets.bitnami.com/managed": "true", + "sealedsecrets.bitnami.com/namespace-wide": "true", + }, + }, + "type": "Opaque", + "data": null, + }, + "encryptedData": + { + "accesskey": "aaa", + "flaskSecretKey": "aaa", + "globusClientID": "aaa", + "globusClientSecret": "aaa", + "jwtSecretKey": "aaa", + "mailgunAPIKey": "aaa", + "postgresql-password": "aaa", + "rabbitmq-password": "aaa", + "root-password": "aaa", + "root-user": "aaa", + "secretkey": "aaa", + "slackSigningSecret": "aaa", + "slackSignupWebhook": "aaa", + }, }, - "encryptedData": { - "accesskey": "aaa", - "flaskSecretKey": "aaa", - "globusClientID": "aaa", - "globusClientSecret": "aaa", - "jwtSecretKey": "aaa", - "mailgunAPIKey": "aaa", - "postgresql-password": "aaa", - "rabbitmq-erlang-cookie": "aaa", - "rabbitmq-password": "aaa", - "root-password": "aaa", - "root-user": "aaa", - "secretkey": "aaa", - "slackSigningSecret": "aaa", - "slackSignupWebhook": "aaa" - } - } } diff --git a/helm/example_secrets.yaml b/helm/example_secrets.yaml index 7d98b463e..047b68f9c 100644 --- a/helm/example_secrets.yaml +++ b/helm/example_secrets.yaml @@ -16,5 +16,4 @@ data: accesskey: <> secretkey: <> rabbitmq-password: << rabbitMQ password >> - rabbitmq-erlang-cookie: << rabbitMQ erlang cookie >> postgresql-password: << postgresql password for postgres user >> diff --git a/helm/servicex/templates/app/rmq_ingress.yaml b/helm/servicex/templates/app/rmq_ingress.yaml new file mode 100644 index 000000000..a98b5077a --- /dev/null +++ b/helm/servicex/templates/app/rmq_ingress.yaml @@ -0,0 +1,32 @@ +{{- if .Values.rabbitmq.apiIngress.enabled -}} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: {{ .Values.rabbitmq.apiIngress.class }} + {{- if .Values.rabbitmq.apiIngress.tls.clusterIssuer }} + cert-manager.io/cluster-issuer: {{ .Values.rabbitmq.apiIngress.tls.clusterIssuer }} + acme.cert-manager.io/http01-edit-in-place: "true" + {{- end }} + labels: + app: {{ .Release.Name }}-rmq-servicex + name: {{ .Release.Name }}-rmq-servicex +spec: + {{- if .Values.rabbitmq.apiIngress.tls.enabled }} + tls: + - hosts: + - {{ .Release.Name }}.{{ .Values.rabbitmq.apiIngress.host }} + secretName: {{ tpl .Values.rabbitmq.apiIngress.tls.secretName . }} + {{- end }} + rules: + - host: {{ .Release.Name }}.{{ .Values.rabbitmq.apiIngress.host }} + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: {{ .Release.Name }}-rabbitmq + port: + number: 5672 +{{- end }} diff --git a/helm/servicex/values.yaml b/helm/servicex/values.yaml index fe5ee0013..26a9e60e5 100644 --- a/helm/servicex/values.yaml +++ b/helm/servicex/values.yaml @@ -120,6 +120,14 @@ postgresql: persistence: enabled: false rabbitmq: + apiIngress: + enabled: false + class: nginx + host: rmq.ssl-hep.org + tls: + enabled: false + clusterIssuer: null + secretName: "{{.Release.Name}}-rmq-tls" auth: password: leftfoot1 persistence: @@ -144,7 +152,7 @@ transformer: scienceContainerPullPolicy: Always language: python - exec: # replace me + exec: # replace me outputDir: /servicex/output persistence: diff --git a/lite/.github/workflows/ci.yaml b/lite/.github/workflows/ci.yaml new file mode 100644 index 000000000..b85c2441a --- /dev/null +++ b/lite/.github/workflows/ci.yaml @@ -0,0 +1,37 @@ +name: CI/CD + +on: + push: + branches: + - "*" + tags: + - "*" + pull_request: + +jobs: + test: + strategy: + matrix: + python-version: ["3.10"] + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4.1.1 + publish: + runs-on: ubuntu-latest + needs: test + steps: + - uses: actions/checkout@v4.1.1 + + - name: Extract tag name + shell: bash + run: echo "##[set-output name=imagetag;]$(echo ${GITHUB_REF##*/})" + id: extract_tag_name + + - name: Build Docker Image + uses: elgohr/Publish-Docker-Github-Action@v5 + with: + name: sslhep/servicex_lite:${{ steps.extract_tag_name.outputs.imagetag }} + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + # tag: "${GITHUB_REF##*/}" diff --git a/lite/Dockerfile b/lite/Dockerfile new file mode 100644 index 000000000..1ac6a2e1a --- /dev/null +++ b/lite/Dockerfile @@ -0,0 +1,27 @@ +FROM python:3.11 AS builder + +RUN useradd -ms /bin/bash cleanup + +COPY pyproject.toml poetry.lock /home/cleanup/ +WORKDIR /home/cleanup + +FROM builder as poetry +ENV POETRY_HOME=/home/cleanup +ENV POETRY_VIRTUALENVS_IN_PROJECT=true +ENV PATH="$POETRY_HOME/bin:$PATH" +RUN python -c 'from urllib.request import urlopen; print(urlopen("https://install.python-poetry.org").read().decode())' | python - +COPY resources ./ +RUN poetry install --no-interaction --no-ansi -vvv + +FROM builder AS runtime + +COPY --from=poetry /home/cleanup /home/cleanup +WORKDIR /home/cleanup +RUN mkdir ./cleanup +COPY scripts/*.py resources/start.sh ./ + +RUN chmod +x start.sh + +USER cleanup + +ENTRYPOINT ["./start.sh"] diff --git a/lite/README.md b/lite/README.md new file mode 100644 index 000000000..3ee91ef65 --- /dev/null +++ b/lite/README.md @@ -0,0 +1,2 @@ +# storage_cleanup +Microservice to cleanup storage used by ServiceX diff --git a/lite/pyproject.toml b/lite/pyproject.toml new file mode 100644 index 000000000..669163861 --- /dev/null +++ b/lite/pyproject.toml @@ -0,0 +1,16 @@ +[tool.poetry] +name = "serivcex_light" +version = "0.1.0" +description = "" +authors = ["Ilija Vukotic "] + +[tool.poetry.dependencies] +python = "^3.11" +kubernetes = "^25.3.0" +python-logstash = "^0.4.8" + +[tool.poetry.dev-dependencies] + +[build-system] +requires = ["poetry-core>=1.0.0"] +build-backend = "poetry.core.masonry.api" diff --git a/lite/requirements.txt b/lite/requirements.txt new file mode 100644 index 000000000..9e347f602 --- /dev/null +++ b/lite/requirements.txt @@ -0,0 +1,2 @@ +minio +servicex_storage \ No newline at end of file diff --git a/lite/resources/deployment.yaml b/lite/resources/deployment.yaml new file mode 100644 index 000000000..e69de29bb diff --git a/lite/resources/start.sh b/lite/resources/start.sh new file mode 100644 index 000000000..e016de373 --- /dev/null +++ b/lite/resources/start.sh @@ -0,0 +1,5 @@ +#!/bin/sh +PATH=.venv/bin:$PATH +. .venv/bin/activate +env +python3.11 ./minio_cleanup.py --max-size $MAX_SIZE --norm-size $NORM_SIZE --max-age $MAX_AGE \ No newline at end of file From 770b82d8f8e899a83519b8d1f561aeda6a02fd8b Mon Sep 17 00:00:00 2001 From: Ilija Vukotic Date: Wed, 1 Nov 2023 12:10:30 -0500 Subject: [PATCH 02/38] i1 --- .github/workflows/deploy-config.json | 7 ++++++- lite/resources/start.sh | 2 +- lite/scripts/sXlite.py | 24 ++++++++++++++++++++++++ 3 files changed, 31 insertions(+), 2 deletions(-) create mode 100644 lite/scripts/sXlite.py diff --git a/.github/workflows/deploy-config.json b/.github/workflows/deploy-config.json index 8e8b83f70..c5cfeb8f9 100644 --- a/.github/workflows/deploy-config.json +++ b/.github/workflows/deploy-config.json @@ -48,5 +48,10 @@ "dir_name": "minio_cleanup", "image_name": "servicex_minio_cleanup", "test_required": false + }, + { + "dir_name": "lite", + "image_name": "servicex_lite", + "test_required": false } -] +] \ No newline at end of file diff --git a/lite/resources/start.sh b/lite/resources/start.sh index e016de373..1e9d863bb 100644 --- a/lite/resources/start.sh +++ b/lite/resources/start.sh @@ -2,4 +2,4 @@ PATH=.venv/bin:$PATH . .venv/bin/activate env -python3.11 ./minio_cleanup.py --max-size $MAX_SIZE --norm-size $NORM_SIZE --max-age $MAX_AGE \ No newline at end of file +python3.11 ./sXlite.py \ No newline at end of file diff --git a/lite/scripts/sXlite.py b/lite/scripts/sXlite.py new file mode 100644 index 000000000..117dddeb4 --- /dev/null +++ b/lite/scripts/sXlite.py @@ -0,0 +1,24 @@ + +import kubernetes +from kubernetes import client + + +class sXlite: + def __init__(self): + kubernetes.config.load_kube_config() + + # client.CoreV1Api().create_namespaced_secret( + # namespace=pod_namespace, body=secret) + # print("Created Secret %s" % secret_name) + # secret_created = True + +# print("Delete existing secret if present") +# try: +# client.CoreV1Api().delete_namespaced_secret( +# namespace=pod_namespace, name=secret_name) +# except kubernetes.client.rest.ApiException as api_exception: +# print("No existing secret to delete") + + +if __name__ == '__main__': + sXlite() From acf88d226b0166a3230e9c09e5f452928aecc2da Mon Sep 17 00:00:00 2001 From: Ilija Vukotic Date: Wed, 1 Nov 2023 12:21:24 -0500 Subject: [PATCH 03/38] nofform --- .github/workflows/deploy-config.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/deploy-config.json b/.github/workflows/deploy-config.json index c5cfeb8f9..e436c511f 100644 --- a/.github/workflows/deploy-config.json +++ b/.github/workflows/deploy-config.json @@ -54,4 +54,4 @@ "image_name": "servicex_lite", "test_required": false } -] \ No newline at end of file +] From e75e37b3f1a9065a355aa1782ec3812d3774587e Mon Sep 17 00:00:00 2001 From: Ilija Vukotic Date: Wed, 1 Nov 2023 13:19:56 -0500 Subject: [PATCH 04/38] f --- lite/.gitignore | 1 + lite/poetry.lock | 407 ++++++++++++++++++++++++++++++++++++++++++ lite/pyproject.toml | 2 +- lite/requirements.txt | 4 +- 4 files changed, 411 insertions(+), 3 deletions(-) create mode 100644 lite/.gitignore create mode 100644 lite/poetry.lock diff --git a/lite/.gitignore b/lite/.gitignore new file mode 100644 index 000000000..b694934fb --- /dev/null +++ b/lite/.gitignore @@ -0,0 +1 @@ +.venv \ No newline at end of file diff --git a/lite/poetry.lock b/lite/poetry.lock new file mode 100644 index 000000000..7cfdacf51 --- /dev/null +++ b/lite/poetry.lock @@ -0,0 +1,407 @@ +# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. + +[[package]] +name = "cachetools" +version = "5.3.2" +description = "Extensible memoizing collections and decorators" +optional = false +python-versions = ">=3.7" +files = [ + {file = "cachetools-5.3.2-py3-none-any.whl", hash = "sha256:861f35a13a451f94e301ce2bec7cac63e881232ccce7ed67fab9b5df4d3beaa1"}, + {file = "cachetools-5.3.2.tar.gz", hash = "sha256:086ee420196f7b2ab9ca2db2520aca326318b68fe5ba8bc4d49cca91add450f2"}, +] + +[[package]] +name = "certifi" +version = "2023.7.22" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.6" +files = [ + {file = "certifi-2023.7.22-py3-none-any.whl", hash = "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"}, + {file = "certifi-2023.7.22.tar.gz", hash = "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"}, +] + +[[package]] +name = "charset-normalizer" +version = "3.3.2" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, + {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, +] + +[[package]] +name = "google-auth" +version = "2.23.4" +description = "Google Authentication Library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google-auth-2.23.4.tar.gz", hash = "sha256:79905d6b1652187def79d491d6e23d0cbb3a21d3c7ba0dbaa9c8a01906b13ff3"}, + {file = "google_auth-2.23.4-py2.py3-none-any.whl", hash = "sha256:d4bbc92fe4b8bfd2f3e8d88e5ba7085935da208ee38a134fc280e7ce682a05f2"}, +] + +[package.dependencies] +cachetools = ">=2.0.0,<6.0" +pyasn1-modules = ">=0.2.1" +rsa = ">=3.1.4,<5" + +[package.extras] +aiohttp = ["aiohttp (>=3.6.2,<4.0.0.dev0)", "requests (>=2.20.0,<3.0.0.dev0)"] +enterprise-cert = ["cryptography (==36.0.2)", "pyopenssl (==22.0.0)"] +pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] +reauth = ["pyu2f (>=0.1.5)"] +requests = ["requests (>=2.20.0,<3.0.0.dev0)"] + +[[package]] +name = "idna" +version = "3.4" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.5" +files = [ + {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, + {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, +] + +[[package]] +name = "kubernetes" +version = "28.1.0" +description = "Kubernetes python client" +optional = false +python-versions = ">=3.6" +files = [ + {file = "kubernetes-28.1.0-py2.py3-none-any.whl", hash = "sha256:10f56f8160dcb73647f15fafda268e7f60cf7dbc9f8e46d52fcd46d3beb0c18d"}, + {file = "kubernetes-28.1.0.tar.gz", hash = "sha256:1468069a573430fb1cb5ad22876868f57977930f80a6749405da31cd6086a7e9"}, +] + +[package.dependencies] +certifi = ">=14.05.14" +google-auth = ">=1.0.1" +oauthlib = ">=3.2.2" +python-dateutil = ">=2.5.3" +pyyaml = ">=5.4.1" +requests = "*" +requests-oauthlib = "*" +six = ">=1.9.0" +urllib3 = ">=1.24.2,<2.0" +websocket-client = ">=0.32.0,<0.40.0 || >0.40.0,<0.41.dev0 || >=0.43.dev0" + +[package.extras] +adal = ["adal (>=1.0.2)"] + +[[package]] +name = "oauthlib" +version = "3.2.2" +description = "A generic, spec-compliant, thorough implementation of the OAuth request-signing logic" +optional = false +python-versions = ">=3.6" +files = [ + {file = "oauthlib-3.2.2-py3-none-any.whl", hash = "sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca"}, + {file = "oauthlib-3.2.2.tar.gz", hash = "sha256:9859c40929662bec5d64f34d01c99e093149682a3f38915dc0655d5a633dd918"}, +] + +[package.extras] +rsa = ["cryptography (>=3.0.0)"] +signals = ["blinker (>=1.4.0)"] +signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"] + +[[package]] +name = "pyasn1" +version = "0.5.0" +description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +files = [ + {file = "pyasn1-0.5.0-py2.py3-none-any.whl", hash = "sha256:87a2121042a1ac9358cabcaf1d07680ff97ee6404333bacca15f76aa8ad01a57"}, + {file = "pyasn1-0.5.0.tar.gz", hash = "sha256:97b7290ca68e62a832558ec3976f15cbf911bf5d7c7039d8b861c2a0ece69fde"}, +] + +[[package]] +name = "pyasn1-modules" +version = "0.3.0" +description = "A collection of ASN.1-based protocols modules" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +files = [ + {file = "pyasn1_modules-0.3.0-py2.py3-none-any.whl", hash = "sha256:d3ccd6ed470d9ffbc716be08bd90efbd44d0734bc9303818f7336070984a162d"}, + {file = "pyasn1_modules-0.3.0.tar.gz", hash = "sha256:5bd01446b736eb9d31512a30d46c1ac3395d676c6f3cafa4c03eb54b9925631c"}, +] + +[package.dependencies] +pyasn1 = ">=0.4.6,<0.6.0" + +[[package]] +name = "python-dateutil" +version = "2.8.2" +description = "Extensions to the standard Python datetime module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +files = [ + {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, + {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "python-logstash" +version = "0.4.8" +description = "Python logging handler for Logstash." +optional = false +python-versions = "*" +files = [ + {file = "python-logstash-0.4.8.tar.gz", hash = "sha256:d04e1ce11ecc107e4a4f3b807fc57d96811e964a554081b3bbb44732f74ef5f9"}, +] + +[[package]] +name = "pyyaml" +version = "6.0.1" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.6" +files = [ + {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, + {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, + {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, + {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, + {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, + {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, + {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, + {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, + {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, + {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, + {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, + {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, + {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, + {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, + {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, + {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, + {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, + {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, + {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, + {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, + {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, + {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, + {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, + {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, + {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, + {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, + {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, +] + +[[package]] +name = "requests" +version = "2.31.0" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.7" +files = [ + {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, + {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "requests-oauthlib" +version = "1.3.1" +description = "OAuthlib authentication support for Requests." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "requests-oauthlib-1.3.1.tar.gz", hash = "sha256:75beac4a47881eeb94d5ea5d6ad31ef88856affe2332b9aafb52c6452ccf0d7a"}, + {file = "requests_oauthlib-1.3.1-py2.py3-none-any.whl", hash = "sha256:2577c501a2fb8d05a304c09d090d6e47c306fef15809d102b327cf8364bddab5"}, +] + +[package.dependencies] +oauthlib = ">=3.0.0" +requests = ">=2.0.0" + +[package.extras] +rsa = ["oauthlib[signedtoken] (>=3.0.0)"] + +[[package]] +name = "rsa" +version = "4.9" +description = "Pure-Python RSA implementation" +optional = false +python-versions = ">=3.6,<4" +files = [ + {file = "rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7"}, + {file = "rsa-4.9.tar.gz", hash = "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21"}, +] + +[package.dependencies] +pyasn1 = ">=0.1.3" + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + +[[package]] +name = "urllib3" +version = "1.26.18" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" +files = [ + {file = "urllib3-1.26.18-py2.py3-none-any.whl", hash = "sha256:34b97092d7e0a3a8cf7cd10e386f401b3737364026c45e622aa02903dffe0f07"}, + {file = "urllib3-1.26.18.tar.gz", hash = "sha256:f8ecc1bba5667413457c529ab955bf8c67b45db799d159066261719e328580a0"}, +] + +[package.extras] +brotli = ["brotli (==1.0.9)", "brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] +secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] +socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] + +[[package]] +name = "websocket-client" +version = "1.6.4" +description = "WebSocket client for Python with low level API options" +optional = false +python-versions = ">=3.8" +files = [ + {file = "websocket-client-1.6.4.tar.gz", hash = "sha256:b3324019b3c28572086c4a319f91d1dcd44e6e11cd340232978c684a7650d0df"}, + {file = "websocket_client-1.6.4-py3-none-any.whl", hash = "sha256:084072e0a7f5f347ef2ac3d8698a5e0b4ffbfcab607628cadabc650fc9a83a24"}, +] + +[package.extras] +docs = ["Sphinx (>=6.0)", "sphinx-rtd-theme (>=1.1.0)"] +optional = ["python-socks", "wsaccel"] +test = ["websockets"] + +[metadata] +lock-version = "2.0" +python-versions = "^3.11" +content-hash = "7f394cd602279201a29d73c1df9f06122262161bf8d6ef37b1c09589207f290b" diff --git a/lite/pyproject.toml b/lite/pyproject.toml index 669163861..72f78aecb 100644 --- a/lite/pyproject.toml +++ b/lite/pyproject.toml @@ -6,7 +6,7 @@ authors = ["Ilija Vukotic "] [tool.poetry.dependencies] python = "^3.11" -kubernetes = "^25.3.0" +kubernetes = "^28.1.0" python-logstash = "^0.4.8" [tool.poetry.dev-dependencies] diff --git a/lite/requirements.txt b/lite/requirements.txt index 9e347f602..606a5c8a6 100644 --- a/lite/requirements.txt +++ b/lite/requirements.txt @@ -1,2 +1,2 @@ -minio -servicex_storage \ No newline at end of file +kubernetes +python-logstash \ No newline at end of file From 4a2871a4f98173efdbc1f29d34a67d694f13c7d2 Mon Sep 17 00:00:00 2001 From: Ilija Vukotic Date: Wed, 1 Nov 2023 15:03:32 -0500 Subject: [PATCH 05/38] f1 --- lite/.gitignore | 3 ++- lite/resources/deployment.yaml | 39 ++++++++++++++++++++++++++++++++++ lite/scripts/sXlite.py | 23 ++++++++++++++++---- 3 files changed, 60 insertions(+), 5 deletions(-) diff --git a/lite/.gitignore b/lite/.gitignore index b694934fb..3ec29ac53 100644 --- a/lite/.gitignore +++ b/lite/.gitignore @@ -1 +1,2 @@ -.venv \ No newline at end of file +.venv +kubeconfig-secret.yaml diff --git a/lite/resources/deployment.yaml b/lite/resources/deployment.yaml index e69de29bb..25ddbc36a 100644 --- a/lite/resources/deployment.yaml +++ b/lite/resources/deployment.yaml @@ -0,0 +1,39 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: servicex-lite + namespace: servicex-lite + labels: + k8s-app: servicex-lite +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: servicex-lite + template: + metadata: + labels: + k8s-app: servicex-lite + spec: + containers: + - name: servicex-lite + image: sslhep/servicex_lite:sXlite + imagePullPolicy: Always + command: ["sleep"] + args: ["infinity"] + volumeMounts: + - name: kubeconfig + mountPath: /home/sxlite/.kube/ + # - name: grid-secret + # mountPath: /etc/grid-certs-ro/ + # - name: rucio-cfg + # mountPath: /opt/rucio/etc/ + volumes: + - name: kubeconfig + secret: + secretName: kubeconfig-secret + # - name: grid-certs-rw-copy + # emptyDir: {} + # - name: rucio-cfg + # configMap: + # name: rucio-config diff --git a/lite/scripts/sXlite.py b/lite/scripts/sXlite.py index 117dddeb4..b14dceae3 100644 --- a/lite/scripts/sXlite.py +++ b/lite/scripts/sXlite.py @@ -1,12 +1,25 @@ - import kubernetes -from kubernetes import client +from kubernetes import client, watch -class sXlite: - def __init__(self): +class sXorigin: + def __init__(self) -> None: kubernetes.config.load_kube_config() + def watch(self): + v1 = client.CoreV1Api() + count = 10 + w = watch.Watch() + for event in w.stream(v1.list_namespace, _request_timeout=60): + print(f"Event: {event['type']} {event['object'].metadata.name}") + count -= 1 + if not count: + w.stop() + + +class sXlite: + def __init__(self) -> None: + pass # client.CoreV1Api().create_namespaced_secret( # namespace=pod_namespace, body=secret) # print("Created Secret %s" % secret_name) @@ -21,4 +34,6 @@ def __init__(self): if __name__ == '__main__': + sXorigin() sXlite() + sXorigin.watch() From 76ff3cf986c555a5120a3e1fdd51d4d46c30834d Mon Sep 17 00:00:00 2001 From: Ilija Vukotic Date: Mon, 6 Nov 2023 12:07:01 -0600 Subject: [PATCH 06/38] basic working version --- .../templates/app/rmq_ingress_inst.yaml | 21 ++ lite/.dockerignore | 2 + lite/.github/workflows/ci.yaml | 2 +- lite/Dockerfile | 16 +- lite/README.md | 18 +- lite/{resources => kube}/deployment.yaml | 18 +- lite/kube/lb.yaml | 11 + lite/scripts/sXlite.py | 235 +++++++++++++++--- 8 files changed, 272 insertions(+), 51 deletions(-) create mode 100644 helm/servicex/templates/app/rmq_ingress_inst.yaml create mode 100644 lite/.dockerignore rename lite/{resources => kube}/deployment.yaml (64%) create mode 100644 lite/kube/lb.yaml diff --git a/helm/servicex/templates/app/rmq_ingress_inst.yaml b/helm/servicex/templates/app/rmq_ingress_inst.yaml new file mode 100644 index 000000000..fe21dc804 --- /dev/null +++ b/helm/servicex/templates/app/rmq_ingress_inst.yaml @@ -0,0 +1,21 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: nginx + labels: + app: servicex-rmq-servicex + name: servicex-rmq-servicex +spec: + rules: + # - host: servicex.rmq.ssl-hep.org + - host: servicex.rmq.af.uchicago.edu + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: servicex-rabbitmq + port: + number: 5672 diff --git a/lite/.dockerignore b/lite/.dockerignore new file mode 100644 index 000000000..911db4120 --- /dev/null +++ b/lite/.dockerignore @@ -0,0 +1,2 @@ +kube +.github diff --git a/lite/.github/workflows/ci.yaml b/lite/.github/workflows/ci.yaml index b85c2441a..2b5a2c403 100644 --- a/lite/.github/workflows/ci.yaml +++ b/lite/.github/workflows/ci.yaml @@ -31,7 +31,7 @@ jobs: - name: Build Docker Image uses: elgohr/Publish-Docker-Github-Action@v5 with: - name: sslhep/servicex_lite:${{ steps.extract_tag_name.outputs.imagetag }} + name: sslhep/servicex-lite:${{ steps.extract_tag_name.outputs.imagetag }} username: ${{ secrets.DOCKER_USERNAME }} password: ${{ secrets.DOCKER_PASSWORD }} # tag: "${GITHUB_REF##*/}" diff --git a/lite/Dockerfile b/lite/Dockerfile index 1ac6a2e1a..8b246fe74 100644 --- a/lite/Dockerfile +++ b/lite/Dockerfile @@ -1,12 +1,12 @@ FROM python:3.11 AS builder -RUN useradd -ms /bin/bash cleanup +RUN useradd -ms /bin/bash sxlite -COPY pyproject.toml poetry.lock /home/cleanup/ -WORKDIR /home/cleanup +COPY pyproject.toml poetry.lock /home/sxlite/ +WORKDIR /home/sxlite FROM builder as poetry -ENV POETRY_HOME=/home/cleanup +ENV POETRY_HOME=/home/sxlite ENV POETRY_VIRTUALENVS_IN_PROJECT=true ENV PATH="$POETRY_HOME/bin:$PATH" RUN python -c 'from urllib.request import urlopen; print(urlopen("https://install.python-poetry.org").read().decode())' | python - @@ -15,13 +15,13 @@ RUN poetry install --no-interaction --no-ansi -vvv FROM builder AS runtime -COPY --from=poetry /home/cleanup /home/cleanup -WORKDIR /home/cleanup -RUN mkdir ./cleanup +COPY --from=poetry /home/sxlite /home/sxlite +WORKDIR /home/sxlite +RUN mkdir ./sxlite COPY scripts/*.py resources/start.sh ./ RUN chmod +x start.sh -USER cleanup +USER sxlite ENTRYPOINT ["./start.sh"] diff --git a/lite/README.md b/lite/README.md index 3ee91ef65..59770bad0 100644 --- a/lite/README.md +++ b/lite/README.md @@ -1,2 +1,16 @@ -# storage_cleanup -Microservice to cleanup storage used by ServiceX +# ServiceX Lite + +This service runs a simple code that listens for the transformation request created in the "master" servicex and deploys transforms that will do the same job as the ones in the master. + +Prerequisits: + +* Kube configs of both slave and master k8s clusters. +* Deployment of a loadbalancer in front of the master servicex RMQ +* Master servicex configmap value ADVERTISED_HOSTNAME is set to the externally accessible servicex URL. +* Master servicex ingress routes internal paths the same way as external. + +## TODO + +* cleanup of canceled/finished transforms. +* limited scope kube config +* if needed - an automatic patching of master's configmap and ingress routes. diff --git a/lite/resources/deployment.yaml b/lite/kube/deployment.yaml similarity index 64% rename from lite/resources/deployment.yaml rename to lite/kube/deployment.yaml index 25ddbc36a..573c64f77 100644 --- a/lite/resources/deployment.yaml +++ b/lite/kube/deployment.yaml @@ -17,23 +17,21 @@ spec: spec: containers: - name: servicex-lite - image: sslhep/servicex_lite:sXlite + image: sslhep/servicex-lite:latest imagePullPolicy: Always command: ["sleep"] args: ["infinity"] volumeMounts: - name: kubeconfig mountPath: /home/sxlite/.kube/ - # - name: grid-secret - # mountPath: /etc/grid-certs-ro/ - # - name: rucio-cfg - # mountPath: /opt/rucio/etc/ + resources: + requests: + memory: "256Mi" + cpu: "250m" + limits: + memory: "512Mi" + cpu: "500m" volumes: - name: kubeconfig secret: secretName: kubeconfig-secret - # - name: grid-certs-rw-copy - # emptyDir: {} - # - name: rucio-cfg - # configMap: - # name: rucio-config diff --git a/lite/kube/lb.yaml b/lite/kube/lb.yaml new file mode 100644 index 000000000..b4683393c --- /dev/null +++ b/lite/kube/lb.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Service +metadata: + name: rmq-lb-service +spec: + selector: + app.kubernetes.io/name: rabbitmq + ports: + - port: 5672 + targetPort: 5672 + type: LoadBalancer diff --git a/lite/scripts/sXlite.py b/lite/scripts/sXlite.py index b14dceae3..d29b9cb47 100644 --- a/lite/scripts/sXlite.py +++ b/lite/scripts/sXlite.py @@ -1,39 +1,214 @@ -import kubernetes -from kubernetes import client, watch +import time +from kubernetes import config, dynamic +from kubernetes.client import api_client +from kubernetes.dynamic import exceptions +requests = {} -class sXorigin: - def __init__(self) -> None: - kubernetes.config.load_kube_config() - def watch(self): - v1 = client.CoreV1Api() - count = 10 - w = watch.Watch() - for event in w.stream(v1.list_namespace, _request_timeout=60): - print(f"Event: {event['type']} {event['object'].metadata.name}") - count -= 1 - if not count: - w.stop() +class cluster: + def __init__(self, context) -> None: + client = dynamic.DynamicClient( + api_client.ApiClient(configuration=config.load_kube_config(context=context)) + ) + # self.service_api = client.resources.get(api_version="v1", kind="Service") + self.node_api = client.resources.get(api_version="v1", kind="Node") + self.secret_api = client.resources.get(api_version="v1", kind="Secret") + self.deployment_api = client.resources.get(api_version="apps/v1", kind="Deployment") + self.cm_api = client.resources.get(api_version="v1", kind="ConfigMap") + def getNodes(self): + for item in self.node_api.get().items: + node = self.node_api.get(name=item.metadata.name) + print(f'{node.metadata.name}') -class sXlite: - def __init__(self) -> None: - pass - # client.CoreV1Api().create_namespaced_secret( - # namespace=pod_namespace, body=secret) - # print("Created Secret %s" % secret_name) - # secret_created = True + def clean_metadata(self, obj): + obj['metadata'].pop('ownerReferences', None) + obj['metadata'].pop('managedFields', None) + obj['metadata'].pop('creationTimestamp', None) + obj['metadata'].pop('namespace', None) + obj['metadata'].pop('resourceVersion', None) + obj['metadata'].pop('uid', None) + obj.pop('status', None) + return obj + + +class sXorigin(cluster): + def __init__(self, context='af-admin@af', namespace='servicex') -> None: + super().__init__(context) + self.ns = namespace + + def read_secret(self, name): + sec = self.secret_api.get(name=name, namespace=self.ns) + self.clean_metadata(sec) + return sec + + def read_configmap(self, name): + cm = self.cm_api.get(name=name, namespace=self.ns) + self.clean_metadata(cm) + return cm + + def read_deployment(self): + for dep in self.deployment_api.get(namespace=self.ns).items: + if dep.metadata.name.startswith('transformer'): + req_id = dep.metadata.name[12:] + if req_id not in requests: + print(f'found deployment for request:{req_id}') + requests[req_id] = 'active' + return self.deployment_body(dep) + + def deployment_body(self, o): + req_id = o.metadata.name[12:] + c1 = o.spec.template.spec.containers[0] + c2 = o.spec.template.spec.containers[1] + v = o.spec.template.spec.volumes + dep = { + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": {"name": o.metadata.name}, + "spec": { + "replicas": 10, + "selector": {"matchLabels": {'app': o.spec.selector.matchLabels.app}}, + "template": { + "metadata": {"labels": {"app": o.spec.template.metadata.labels.app}}, + "spec": { + "containers": [ + { + "name": c1.name, + "image": c1.image, + "command": ["bash", "-c"], + "env": [], + "volumeMounts": [], + "resources": {"limits": {"cpu": "1"}}, + "args": [] + }, + { + "name": c2.name, + "image": c2.image, + "command": ["bash", "-c"], + "env": [], + "volumeMounts": [], + "resources": {"limits": {"cpu": "1"}}, + "args": ['-c'], + } + ], + "volumes": [] + } + } + } + } + for e in c1.env: + ta = {'name': e.name, 'value': e.value} + dep['spec']['template']['spec']['containers'][0]['env'].append(ta) + for e in c2.env: + ta = {'name': e.name, 'value': e.value} + dep['spec']['template']['spec']['containers'][1]['env'].append(ta) + + for e in c1.volumeMounts: + ta = {'name': e.name, 'mountPath': e.mountPath} + dep['spec']['template']['spec']['containers'][0]['volumeMounts'].append(ta) + for e in c2.volumeMounts: + ta = {'name': e.name, 'mountPath': e.mountPath} + dep['spec']['template']['spec']['containers'][1]['volumeMounts'].append(ta) + + for e in v: + vo = {'name': e.name} + if e.emptyDir: + vo['emptyDir'] = {} + if e.secret: + s = e.secret + vo['secret'] = {'secretName': s.secretName, 'defaultMode': s.defaultMode} + if e.configMap: + cm = e.configMap + vo['configMap'] = {'name': cm.name, 'defaultMode': cm.defaultMode} + dep['spec']['template']['spec']['volumes'].append(vo) + + sidecar_args = [ + f'PYTHONPATH=/servicex/transformer_sidecar:$PYTHONPATH python /servicex/transformer_sidecar/transformer.py --request-id {req_id} --rabbit-uri amqp://user:993ecf4239a783c521e6077d91@192.170.241.253:5672/%2F?heartbeat=9000 --result-destination object-store --result-format root-file' + ] + transformer_args = [ + f'until [ -f /servicex/output/scripts/proxy-exporter.sh ];do sleep 5;done && /servicex/output/scripts/proxy-exporter.sh & sleep 5 && cp /generated/transformer_capabilities.json /servicex/output && PYTHONPATH=/generated:$PYTHONPATH bash /servicex/output/scripts/watch.sh python /generated/transform_single_file.py /servicex/output/{req_id}' + ] + dep['spec']['template']['spec']['containers'][0]['args'] = sidecar_args + dep['spec']['template']['spec']['containers'][1]['args'] = transformer_args + + print('=========================') + print(dep) + return dep + + +class sXlite(cluster): + def __init__(self, context='admin@river', namespace='servicex-lite') -> None: + super().__init__(context) + self.ns = namespace -# print("Delete existing secret if present") -# try: -# client.CoreV1Api().delete_namespaced_secret( -# namespace=pod_namespace, name=secret_name) -# except kubernetes.client.rest.ApiException as api_exception: -# print("No existing secret to delete") + def create_secret(self, secret): + try: + secret = self.secret_api.create(body=secret, namespace=self.ns) + print(f'created secret: {secret.metadata.name}') + except exceptions.ConflictError: + print(f'conflict creating secret: {secret.metadata.name}') + + def delete_secret(self, name): + try: + self.secret_api.delete(body={}, name=name, namespace=self.ns) + print(f'deleted secret: {name}') + except exceptions.NotFoundError as e: + print('could not delete resource:', e.summary()) + + def create_configmap(self, cm): + try: + cm = self.cm_api.create(body=cm, namespace=self.ns) + print(f'created configmap: {cm.metadata.name}') + except exceptions.ConflictError: + print(f'conflict creating configmap: {cm.metadata.name}') + + def delete_configmap(self, name): + self.cm_api.delete(body={}, name=name, namespace=self.ns) + + def create_deployment(self, dep): + try: + dep = self.deployment_api.create(body=dep, namespace=self.ns) + print(f'created deployment: {dep.metadata.name}') + except exceptions.ConflictError as e: + print('conflict creating deployment:', e.summary()) + + def delete_deployment(self, name): + pass if __name__ == '__main__': - sXorigin() - sXlite() - sXorigin.watch() + + def cleanup(): + sxl.delete_secret('grid-certs-secret') + sxl.delete_secret('servicex-secrets') + sxl.delete_secret('servicex-x509-proxy') + time.sleep(10) + + def start(): + sec = sxo.read_secret('grid-certs-secret') + sxl.create_secret(sec) + + sec = sxo.read_secret('servicex-secrets') + sxl.create_secret(sec) + + sec = sxo.read_secret('servicex-x509-proxy') + sxl.create_secret(sec) + + sxo = sXorigin() + sxl = sXlite() + + # cleanup() + start() + + # watch for new CMs and requests + while True: + d = sxo.read_deployment() + if d: + req_id = d['metadata']['name'][12:] + print(f'req_id: {req_id}') + sxl.create_deployment(d) + cm = sxo.read_configmap(f'{req_id}-generated-source') + sxl.create_configmap(cm) + else: + time.sleep(5) From dc5dc2855044547edc0d3e6b92cf00802eeebc7b Mon Sep 17 00:00:00 2001 From: Ilija Vukotic Date: Mon, 6 Nov 2023 12:16:30 -0600 Subject: [PATCH 07/38] remove hardcoded user/pass --- lite/README.md | 1 + lite/scripts/sXlite.py | 9 +++++++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/lite/README.md b/lite/README.md index 59770bad0..dfe325933 100644 --- a/lite/README.md +++ b/lite/README.md @@ -12,5 +12,6 @@ Prerequisits: ## TODO * cleanup of canceled/finished transforms. +* fix sidecar args (now hardcoded). * limited scope kube config * if needed - an automatic patching of master's configmap and ingress routes. diff --git a/lite/scripts/sXlite.py b/lite/scripts/sXlite.py index d29b9cb47..cebdfc1ef 100644 --- a/lite/scripts/sXlite.py +++ b/lite/scripts/sXlite.py @@ -124,7 +124,7 @@ def deployment_body(self, o): dep['spec']['template']['spec']['volumes'].append(vo) sidecar_args = [ - f'PYTHONPATH=/servicex/transformer_sidecar:$PYTHONPATH python /servicex/transformer_sidecar/transformer.py --request-id {req_id} --rabbit-uri amqp://user:993ecf4239a783c521e6077d91@192.170.241.253:5672/%2F?heartbeat=9000 --result-destination object-store --result-format root-file' + f'PYTHONPATH=/servicex/transformer_sidecar:$PYTHONPATH python /servicex/transformer_sidecar/transformer.py --request-id {req_id} --rabbit-uri amqp://{user}:{password}@192.170.241.253:5672/%2F?heartbeat=9000 --result-destination object-store --result-format root-file' ] transformer_args = [ f'until [ -f /servicex/output/scripts/proxy-exporter.sh ];do sleep 5;done && /servicex/output/scripts/proxy-exporter.sh & sleep 5 && cp /generated/transformer_capabilities.json /servicex/output && PYTHONPATH=/generated:$PYTHONPATH bash /servicex/output/scripts/watch.sh python /generated/transform_single_file.py /servicex/output/{req_id}' @@ -133,9 +133,13 @@ def deployment_body(self, o): dep['spec']['template']['spec']['containers'][1]['args'] = transformer_args print('=========================') - print(dep) + # print(dep) return dep + def patch_master(self): + print('patch ingress if needed.') + print('patch configmap so it ADVERTIZES external URL.') + class sXlite(cluster): def __init__(self, context='admin@river', namespace='servicex-lite') -> None: @@ -198,6 +202,7 @@ def start(): sxo = sXorigin() sxl = sXlite() + sxo.patch_master() # cleanup() start() From 879ea19b69bca3bcb259014153945355363ac9a8 Mon Sep 17 00:00:00 2001 From: Ilija Vukotic Date: Mon, 6 Nov 2023 14:24:23 -0600 Subject: [PATCH 08/38] deployment --- lite/README.md | 3 +- lite/kube/deployment.yaml | 11 ++++-- lite/scripts/sXlite.py | 75 ++++++++++++++++++++++++++++----------- 3 files changed, 65 insertions(+), 24 deletions(-) diff --git a/lite/README.md b/lite/README.md index dfe325933..8d3b07322 100644 --- a/lite/README.md +++ b/lite/README.md @@ -11,7 +11,6 @@ Prerequisits: ## TODO -* cleanup of canceled/finished transforms. -* fix sidecar args (now hardcoded). * limited scope kube config +* add options to configure number of initial pods, resource request, etc. * if needed - an automatic patching of master's configmap and ingress routes. diff --git a/lite/kube/deployment.yaml b/lite/kube/deployment.yaml index 573c64f77..4cd82b675 100644 --- a/lite/kube/deployment.yaml +++ b/lite/kube/deployment.yaml @@ -19,8 +19,15 @@ spec: - name: servicex-lite image: sslhep/servicex-lite:latest imagePullPolicy: Always - command: ["sleep"] - args: ["infinity"] + # command: ["sleep"] + # args: ["infinity"] + env: + - name: RMQ_USER + value: "user" + - name: RMQ_HOST + value: "192.170.241.253" + - name: INITIAL_PODS + value: "15" volumeMounts: - name: kubeconfig mountPath: /home/sxlite/.kube/ diff --git a/lite/scripts/sXlite.py b/lite/scripts/sXlite.py index cebdfc1ef..dc3a9baba 100644 --- a/lite/scripts/sXlite.py +++ b/lite/scripts/sXlite.py @@ -1,9 +1,16 @@ +import os import time +import base64 from kubernetes import config, dynamic from kubernetes.client import api_client from kubernetes.dynamic import exceptions -requests = {} +requests = {'active': [], 'new': [], 'unknown': []} + +rmq_pass = '' +rmq_user = os.getenv("RMQ_USER", 'user') +rmq_host = os.getenv("RMQ_HOST", '192.170.241.253') +initial_pods = int(os.getenv("INITIAL_PODS", '15')) class cluster: @@ -48,16 +55,23 @@ def read_configmap(self, name): self.clean_metadata(cm) return cm - def read_deployment(self): + def update_requests(self): + # reset all known to unknown + requests['unknown'] = requests['active'] + requests['active'] = [] + requests['new'] = [] + for dep in self.deployment_api.get(namespace=self.ns).items: if dep.metadata.name.startswith('transformer'): req_id = dep.metadata.name[12:] - if req_id not in requests: - print(f'found deployment for request:{req_id}') - requests[req_id] = 'active' - return self.deployment_body(dep) - - def deployment_body(self, o): + if req_id in requests['unknown']: + requests['active'].append(req_id) + requests['unknown'].remove(req_id) + else: + requests['new'].append(req_id) + + def get_deployment(self, req_id): + o = self.deployment_api.get(namespace=self.ns, name=f'transformer-{req_id}') req_id = o.metadata.name[12:] c1 = o.spec.template.spec.containers[0] c2 = o.spec.template.spec.containers[1] @@ -67,7 +81,7 @@ def deployment_body(self, o): "kind": "Deployment", "metadata": {"name": o.metadata.name}, "spec": { - "replicas": 10, + "replicas": initial_pods, "selector": {"matchLabels": {'app': o.spec.selector.matchLabels.app}}, "template": { "metadata": {"labels": {"app": o.spec.template.metadata.labels.app}}, @@ -124,7 +138,7 @@ def deployment_body(self, o): dep['spec']['template']['spec']['volumes'].append(vo) sidecar_args = [ - f'PYTHONPATH=/servicex/transformer_sidecar:$PYTHONPATH python /servicex/transformer_sidecar/transformer.py --request-id {req_id} --rabbit-uri amqp://{user}:{password}@192.170.241.253:5672/%2F?heartbeat=9000 --result-destination object-store --result-format root-file' + f'PYTHONPATH=/servicex/transformer_sidecar:$PYTHONPATH python /servicex/transformer_sidecar/transformer.py --request-id {req_id} --rabbit-uri amqp://{rmq_user}:{rmq_pass}@{rmq_host}:5672/%2F?heartbeat=9000 --result-destination object-store --result-format root-file' ] transformer_args = [ f'until [ -f /servicex/output/scripts/proxy-exporter.sh ];do sleep 5;done && /servicex/output/scripts/proxy-exporter.sh & sleep 5 && cp /generated/transformer_capabilities.json /servicex/output && PYTHONPATH=/generated:$PYTHONPATH bash /servicex/output/scripts/watch.sh python /generated/transform_single_file.py /servicex/output/{req_id}' @@ -168,7 +182,11 @@ def create_configmap(self, cm): print(f'conflict creating configmap: {cm.metadata.name}') def delete_configmap(self, name): - self.cm_api.delete(body={}, name=name, namespace=self.ns) + try: + self.cm_api.delete(body={}, name=name, namespace=self.ns) + print(f'deleted configmap: {name}') + except Exception as e: + print(f'could not delete configmap:{name}', e) def create_deployment(self, dep): try: @@ -178,7 +196,11 @@ def create_deployment(self, dep): print('conflict creating deployment:', e.summary()) def delete_deployment(self, name): - pass + try: + self.deployment_api.delete(body={}, name=name, namespace=self.ns) + print(f'deleted deployment: {name}') + except Exception as e: + print(f'could not delete deployment:{name}', e) if __name__ == '__main__': @@ -187,7 +209,6 @@ def cleanup(): sxl.delete_secret('grid-certs-secret') sxl.delete_secret('servicex-secrets') sxl.delete_secret('servicex-x509-proxy') - time.sleep(10) def start(): sec = sxo.read_secret('grid-certs-secret') @@ -195,6 +216,8 @@ def start(): sec = sxo.read_secret('servicex-secrets') sxl.create_secret(sec) + global rmq_pass + rmq_pass = base64.b64decode(sec.data['rabbitmq-password']) sec = sxo.read_secret('servicex-x509-proxy') sxl.create_secret(sec) @@ -206,14 +229,26 @@ def start(): # cleanup() start() - # watch for new CMs and requests + count = 0 while True: - d = sxo.read_deployment() - if d: - req_id = d['metadata']['name'][12:] - print(f'req_id: {req_id}') + sxo.update_requests() + for req_id in requests['new']: + d = sxo.get_deployment(req_id) sxl.create_deployment(d) cm = sxo.read_configmap(f'{req_id}-generated-source') sxl.create_configmap(cm) - else: - time.sleep(5) + requests['active'].append(req_id) + + for req_id in requests['unknown']: + sxl.delete_configmap(f'{req_id}-generated-source') + sxl.delete_deployment(f'transformer-{req_id}') + + for req_id in requests['active']: + print(f'req_id: {req_id} still active.') + + count += 1 + if not count % 720 and len(requests['active']) == 0: # replace secrets + cleanup() + start() + + time.sleep(5) From 05a9c9ed6cb6edc5b247375461f9c8ece49f4e20 Mon Sep 17 00:00:00 2001 From: Ilija Vukotic Date: Mon, 6 Nov 2023 16:37:04 -0600 Subject: [PATCH 09/38] f --- lite/Dockerfile | 2 +- ..._storage_manager.cpython-310-pytest-7.4.0.pyc | Bin 0 -> 3344 bytes 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 minio_cleanup/tests/__pycache__/test_minio_storage_manager.cpython-310-pytest-7.4.0.pyc diff --git a/lite/Dockerfile b/lite/Dockerfile index 8b246fe74..46fa0f365 100644 --- a/lite/Dockerfile +++ b/lite/Dockerfile @@ -24,4 +24,4 @@ RUN chmod +x start.sh USER sxlite -ENTRYPOINT ["./start.sh"] +ENTRYPOINT ["/home/sxlite/start.sh"] diff --git a/minio_cleanup/tests/__pycache__/test_minio_storage_manager.cpython-310-pytest-7.4.0.pyc b/minio_cleanup/tests/__pycache__/test_minio_storage_manager.cpython-310-pytest-7.4.0.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6636e7873d9e3a5b077d54aacc3c381c93d73cdf GIT binary patch literal 3344 zcmb7GOLN;)6xPd$8?q!3iO5aKk;PjHRYj7eP;+u}X3-s8SPaL2x6(AjyptC;wP}R~ zr_l2&IwhJX8p#oTL)lRro7%*ndP65BF-RUByscfG6m}J`6-R8P5t}_?E1%fN#GVR$ zCxyKN`d7!D8nKOzJ3V3>9(QI>C9`{qQ+^=Ck7)R+QSBcl*+GgBlTn~e&ol>qR)Q=-_sIVM#U^@1n zMkpY>g-`@IR2?H0b1(KASNe^mBZ6+f;jF)4>k`a;(&1{~?^?fc{ZQe?THD{E>0-Ze z#fP3FmkQDyc#>NHRFKJ0s7mA=nJ7ekF73#U){}uELeBMMoftv}qO3@@JhvWp9?>Md z>m_u%0AC)&^@oVQ0%P|t+yMOQyYM7kfK*ClA}>h$$^lH)m3O3u>S_aRZlKj&BQ+cO zhDBg$X`xXds$i`Ks1Y6bY3!=c6yZ|@?&6&XjoMZBOGG}9pJ`ob-|lK%`CC$@7*MgnYTfC z1*Yk>-O!6b#T@H1!6Fe4Ch=051|j9f%F+sSqD~pM%gwgS#qHt+FM)B>&%bMX%~}pZ zh&I=`6~_LS>q8(VtOonS%@p<HH?z12F;-1t=^0Mc1WW% zSxv*%>Poik$CnpZ1P79=Dvk**IC5qf`<8mE!{>wU6|lImKg>Oo}mVq(zYU&4W5PGjc~&I1HLLJC-}+3X$ETtK)8&|`0Q{249S zyJ)*QB{fG@kw7jQ4@>GE;+92g3 zFwfQmvZq{-8z4OI}(aLqxVyr3zLfPIA2qp1a6g+{SaA{B8@^%AvBrpXMMJy6Nq zf+P}lxvP;GB&-;HVt|IgzhaZkblu?hkfXf}%g ze_|4wM;N4awt(Uk0y2foAe=>b0|86ISPUH9M_m;l2;uA*{yHqzDh!hY`v8FJ?bMIr zr&*9-e6Sk`m@WTHSd@E7upb&CDjb{9Fn)4UHY!8vUaQ~L-<0=+mKd4d#tIawEoopsLWyv zLj1k7!Q&(fD_RkfG~CChrsJ_iEY}Qfx-J%C*X5Rr|8ngnwu`QdbtR=kGXGapmQJvAJEPx-z{6NKxzRrTqB5Qdpz50}B7>#AmVFsMGMvhIT}3kjSd$t!!U!Be@fFJ|~%S fQ7$T`Vk??DEeJ4#K!71h5$zXn=+ovmMezLx%z<9Y literal 0 HcmV?d00001 From aab1836aac5163292315f3b3ce26d10910ed902a Mon Sep 17 00:00:00 2001 From: Ilija Vukotic Date: Wed, 8 Nov 2023 14:16:27 -0600 Subject: [PATCH 10/38] Add kubeconfig and test files to .gitignore, and set PYTHONUNBUFFERED to true in deployment.yaml --- lite/.gitignore | 3 ++- lite/kube/deployment.yaml | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/lite/.gitignore b/lite/.gitignore index 3ec29ac53..265cff59c 100644 --- a/lite/.gitignore +++ b/lite/.gitignore @@ -1,2 +1,3 @@ .venv -kubeconfig-secret.yaml +kubeconfig-secret* +test* \ No newline at end of file diff --git a/lite/kube/deployment.yaml b/lite/kube/deployment.yaml index 4cd82b675..dbd406c7c 100644 --- a/lite/kube/deployment.yaml +++ b/lite/kube/deployment.yaml @@ -28,6 +28,8 @@ spec: value: "192.170.241.253" - name: INITIAL_PODS value: "15" + - name: PYTHONUNBUFFERED + value: "true" volumeMounts: - name: kubeconfig mountPath: /home/sxlite/.kube/ From 089122fefe4cd6cfd7c47fa0a1f0b24582e5238c Mon Sep 17 00:00:00 2001 From: Ilija Vukotic Date: Wed, 8 Nov 2023 14:16:52 -0600 Subject: [PATCH 11/38] Add xcache and HPA options to kube config --- lite/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lite/README.md b/lite/README.md index 8d3b07322..9aadfc4a5 100644 --- a/lite/README.md +++ b/lite/README.md @@ -11,6 +11,6 @@ Prerequisits: ## TODO -* limited scope kube config -* add options to configure number of initial pods, resource request, etc. +* add options to configure xcache, resource request, etc. +* add creation of a HPA * if needed - an automatic patching of master's configmap and ingress routes. From c16c930e97151ce37095cb62394b3068b48ff6eb Mon Sep 17 00:00:00 2001 From: Ilija Vukotic Date: Tue, 13 Feb 2024 18:06:25 -0600 Subject: [PATCH 12/38] garbage --- ..._storage_manager.cpython-310-pytest-7.4.0.pyc | Bin 3344 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 minio_cleanup/tests/__pycache__/test_minio_storage_manager.cpython-310-pytest-7.4.0.pyc diff --git a/minio_cleanup/tests/__pycache__/test_minio_storage_manager.cpython-310-pytest-7.4.0.pyc b/minio_cleanup/tests/__pycache__/test_minio_storage_manager.cpython-310-pytest-7.4.0.pyc deleted file mode 100644 index 6636e7873d9e3a5b077d54aacc3c381c93d73cdf..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3344 zcmb7GOLN;)6xPd$8?q!3iO5aKk;PjHRYj7eP;+u}X3-s8SPaL2x6(AjyptC;wP}R~ zr_l2&IwhJX8p#oTL)lRro7%*ndP65BF-RUByscfG6m}J`6-R8P5t}_?E1%fN#GVR$ zCxyKN`d7!D8nKOzJ3V3>9(QI>C9`{qQ+^=Ck7)R+QSBcl*+GgBlTn~e&ol>qR)Q=-_sIVM#U^@1n zMkpY>g-`@IR2?H0b1(KASNe^mBZ6+f;jF)4>k`a;(&1{~?^?fc{ZQe?THD{E>0-Ze z#fP3FmkQDyc#>NHRFKJ0s7mA=nJ7ekF73#U){}uELeBMMoftv}qO3@@JhvWp9?>Md z>m_u%0AC)&^@oVQ0%P|t+yMOQyYM7kfK*ClA}>h$$^lH)m3O3u>S_aRZlKj&BQ+cO zhDBg$X`xXds$i`Ks1Y6bY3!=c6yZ|@?&6&XjoMZBOGG}9pJ`ob-|lK%`CC$@7*MgnYTfC z1*Yk>-O!6b#T@H1!6Fe4Ch=051|j9f%F+sSqD~pM%gwgS#qHt+FM)B>&%bMX%~}pZ zh&I=`6~_LS>q8(VtOonS%@p<HH?z12F;-1t=^0Mc1WW% zSxv*%>Poik$CnpZ1P79=Dvk**IC5qf`<8mE!{>wU6|lImKg>Oo}mVq(zYU&4W5PGjc~&I1HLLJC-}+3X$ETtK)8&|`0Q{249S zyJ)*QB{fG@kw7jQ4@>GE;+92g3 zFwfQmvZq{-8z4OI}(aLqxVyr3zLfPIA2qp1a6g+{SaA{B8@^%AvBrpXMMJy6Nq zf+P}lxvP;GB&-;HVt|IgzhaZkblu?hkfXf}%g ze_|4wM;N4awt(Uk0y2foAe=>b0|86ISPUH9M_m;l2;uA*{yHqzDh!hY`v8FJ?bMIr zr&*9-e6Sk`m@WTHSd@E7upb&CDjb{9Fn)4UHY!8vUaQ~L-<0=+mKd4d#tIawEoopsLWyv zLj1k7!Q&(fD_RkfG~CChrsJ_iEY}Qfx-J%C*X5Rr|8ngnwu`QdbtR=kGXGapmQJvAJEPx-z{6NKxzRrTqB5Qdpz50}B7>#AmVFsMGMvhIT}3kjSd$t!!U!Be@fFJ|~%S fQ7$T`Vk??DEeJ4#K!71h5$zXn=+ovmMezLx%z<9Y From a5471fb4f6a170956c1e6bf46b40dad9047c9025 Mon Sep 17 00:00:00 2001 From: Ilija Vukotic Date: Tue, 13 Feb 2024 18:10:23 -0600 Subject: [PATCH 13/38] making lint happy --- helm/servicex/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helm/servicex/values.yaml b/helm/servicex/values.yaml index 1ef9d0d51..87539272d 100644 --- a/helm/servicex/values.yaml +++ b/helm/servicex/values.yaml @@ -147,7 +147,7 @@ transformer: scienceContainerPullPolicy: Always language: python - exec: # replace me + exec: # replace me outputDir: /servicex/output persistence: From 70d6745c112405711ea0d3dd95c7e02517575cb2 Mon Sep 17 00:00:00 2001 From: Ilija Vukotic Date: Fri, 23 Feb 2024 14:11:36 -0600 Subject: [PATCH 14/38] Add environment variables for origin and sxlite contexts --- lite/kube/deployment.yaml | 8 ++++++++ lite/scripts/sXlite.py | 13 +++++++++++-- 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/lite/kube/deployment.yaml b/lite/kube/deployment.yaml index dbd406c7c..61150fca2 100644 --- a/lite/kube/deployment.yaml +++ b/lite/kube/deployment.yaml @@ -22,6 +22,14 @@ spec: # command: ["sleep"] # args: ["infinity"] env: + - name: ORIGIN_CONTEXT + value: af-admin@af + - name: ORIGIN_NAMESPACE + value: servicex + - name: SXLITE_CONTEXT + value: kubernetes-admin@cluster.local + - name: SXLITE_NAMESPACE + value: servicex-lite - name: RMQ_USER value: "user" - name: RMQ_HOST diff --git a/lite/scripts/sXlite.py b/lite/scripts/sXlite.py index dc3a9baba..c9c646559 100644 --- a/lite/scripts/sXlite.py +++ b/lite/scripts/sXlite.py @@ -1,4 +1,5 @@ import os +import sys import time import base64 from kubernetes import config, dynamic @@ -41,7 +42,11 @@ def clean_metadata(self, obj): class sXorigin(cluster): - def __init__(self, context='af-admin@af', namespace='servicex') -> None: + def __init__(self) -> None: + context = os.environ.get('ORIGIN_CONTEXT') + namespace = os.environ.get('ORIGIN_NAMESPACE') + if not context or not namespace: + sys.exit(1) super().__init__(context) self.ns = namespace @@ -156,7 +161,11 @@ def patch_master(self): class sXlite(cluster): - def __init__(self, context='admin@river', namespace='servicex-lite') -> None: + def __init__(self) -> None: + context = os.environ.get('SXLITE_CONTEXT') + namespace = os.environ.get('SXLITE_NAMESPACE') + if not context or not namespace: + sys.exit(1) super().__init__(context) self.ns = namespace From 778c2a7dda03f9cb89199629bc264630ccbcd0c7 Mon Sep 17 00:00:00 2001 From: Ilija Vukotic Date: Fri, 23 Feb 2024 16:02:34 -0600 Subject: [PATCH 15/38] Add print statements for reading and creating secrets, configmaps, and deployments --- lite/scripts/sXlite.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/lite/scripts/sXlite.py b/lite/scripts/sXlite.py index c9c646559..e2117667a 100644 --- a/lite/scripts/sXlite.py +++ b/lite/scripts/sXlite.py @@ -51,11 +51,13 @@ def __init__(self) -> None: self.ns = namespace def read_secret(self, name): + print(f'reading secret: {name}') sec = self.secret_api.get(name=name, namespace=self.ns) self.clean_metadata(sec) return sec def read_configmap(self, name): + print(f'reading configmap: {name}') cm = self.cm_api.get(name=name, namespace=self.ns) self.clean_metadata(cm) return cm @@ -170,6 +172,7 @@ def __init__(self) -> None: self.ns = namespace def create_secret(self, secret): + print(f'creating secret: {secret.metadata.name}') try: secret = self.secret_api.create(body=secret, namespace=self.ns) print(f'created secret: {secret.metadata.name}') @@ -177,6 +180,7 @@ def create_secret(self, secret): print(f'conflict creating secret: {secret.metadata.name}') def delete_secret(self, name): + print(f'deleting secret: {name}') try: self.secret_api.delete(body={}, name=name, namespace=self.ns) print(f'deleted secret: {name}') @@ -184,6 +188,7 @@ def delete_secret(self, name): print('could not delete resource:', e.summary()) def create_configmap(self, cm): + print(f'creating configmap: {cm.metadata.name}') try: cm = self.cm_api.create(body=cm, namespace=self.ns) print(f'created configmap: {cm.metadata.name}') @@ -191,6 +196,7 @@ def create_configmap(self, cm): print(f'conflict creating configmap: {cm.metadata.name}') def delete_configmap(self, name): + print(f'deleting configmap: {name}') try: self.cm_api.delete(body={}, name=name, namespace=self.ns) print(f'deleted configmap: {name}') @@ -198,6 +204,7 @@ def delete_configmap(self, name): print(f'could not delete configmap:{name}', e) def create_deployment(self, dep): + print(f'creating deployment: {dep.metadata.name}') try: dep = self.deployment_api.create(body=dep, namespace=self.ns) print(f'created deployment: {dep.metadata.name}') @@ -205,6 +212,7 @@ def create_deployment(self, dep): print('conflict creating deployment:', e.summary()) def delete_deployment(self, name): + print(f'deleting deployment: {name}') try: self.deployment_api.delete(body={}, name=name, namespace=self.ns) print(f'deleted deployment: {name}') From eaa9d9fe20758297e6ddf464ab28c2b946769797 Mon Sep 17 00:00:00 2001 From: Ilija Vukotic Date: Mon, 26 Feb 2024 16:46:24 -0600 Subject: [PATCH 16/38] f --- lite/scripts/sXlite.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lite/scripts/sXlite.py b/lite/scripts/sXlite.py index e2117667a..708773c77 100644 --- a/lite/scripts/sXlite.py +++ b/lite/scripts/sXlite.py @@ -204,7 +204,7 @@ def delete_configmap(self, name): print(f'could not delete configmap:{name}', e) def create_deployment(self, dep): - print(f'creating deployment: {dep.metadata.name}') + print(f'creating deployment: {dep}') try: dep = self.deployment_api.create(body=dep, namespace=self.ns) print(f'created deployment: {dep.metadata.name}') From 487f361e91ea28936e1bd8a62bf05fa8374af575 Mon Sep 17 00:00:00 2001 From: Ilija Vukotic Date: Mon, 26 Feb 2024 16:50:51 -0600 Subject: [PATCH 17/38] typo fix --- lite/kube/deployment.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lite/kube/deployment.yaml b/lite/kube/deployment.yaml index 61150fca2..f1d000904 100644 --- a/lite/kube/deployment.yaml +++ b/lite/kube/deployment.yaml @@ -17,7 +17,7 @@ spec: spec: containers: - name: servicex-lite - image: sslhep/servicex-lite:latest + image: sslhep/servicex_lite:latest imagePullPolicy: Always # command: ["sleep"] # args: ["infinity"] From ac3e7346433a5af817b2f60f0ac36b7724935245 Mon Sep 17 00:00:00 2001 From: Ilija Vukotic Date: Tue, 27 Feb 2024 08:50:38 -0600 Subject: [PATCH 18/38] requests and limits --- lite/scripts/sXlite.py | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/lite/scripts/sXlite.py b/lite/scripts/sXlite.py index 708773c77..1a2f6c22d 100644 --- a/lite/scripts/sXlite.py +++ b/lite/scripts/sXlite.py @@ -100,7 +100,16 @@ def get_deployment(self, req_id): "command": ["bash", "-c"], "env": [], "volumeMounts": [], - "resources": {"limits": {"cpu": "1"}}, + "resources": { + "requests": { + "memory": "2Gi", + "cpu": "250m" + }, + "limits": { + "memory": "4Gi", + "cpu": "1" + } + }, "args": [] }, { @@ -109,7 +118,16 @@ def get_deployment(self, req_id): "command": ["bash", "-c"], "env": [], "volumeMounts": [], - "resources": {"limits": {"cpu": "1"}}, + "resources": { + "requests": { + "memory": "2Gi", + "cpu": "250m" + }, + "limits": { + "memory": "4Gi", + "cpu": "1" + } + }, "args": ['-c'], } ], From e0adc6781f375f6776b69d99eb78b9f2c6a3c3f7 Mon Sep 17 00:00:00 2001 From: Ilija Vukotic Date: Thu, 29 Feb 2024 11:13:48 -0600 Subject: [PATCH 19/38] more printouts --- lite/scripts/sXlite.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lite/scripts/sXlite.py b/lite/scripts/sXlite.py index 1a2f6c22d..c182a8214 100644 --- a/lite/scripts/sXlite.py +++ b/lite/scripts/sXlite.py @@ -16,6 +16,7 @@ class cluster: def __init__(self, context) -> None: + print(f'initializing context:{context}') client = dynamic.DynamicClient( api_client.ApiClient(configuration=config.load_kube_config(context=context)) ) From 69203c7d18128d92bb766e3dfda23fc147fbe5e0 Mon Sep 17 00:00:00 2001 From: Ilija Vukotic Date: Thu, 29 Feb 2024 12:32:01 -0600 Subject: [PATCH 20/38] one more decode --- lite/scripts/sXlite.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lite/scripts/sXlite.py b/lite/scripts/sXlite.py index c182a8214..a50c1d978 100644 --- a/lite/scripts/sXlite.py +++ b/lite/scripts/sXlite.py @@ -253,7 +253,7 @@ def start(): sec = sxo.read_secret('servicex-secrets') sxl.create_secret(sec) global rmq_pass - rmq_pass = base64.b64decode(sec.data['rabbitmq-password']) + rmq_pass = base64.b64decode(sec.data['rabbitmq-password']).decode() sec = sxo.read_secret('servicex-x509-proxy') sxl.create_secret(sec) From ead1d89c310038d378921880b4af7d8e530f1e28 Mon Sep 17 00:00:00 2001 From: Ilija Vukotic Date: Fri, 1 Mar 2024 11:31:25 -0600 Subject: [PATCH 21/38] xcache overwrite --- lite/kube/deployment.yaml | 4 ++-- lite/scripts/sXlite.py | 4 ++++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/lite/kube/deployment.yaml b/lite/kube/deployment.yaml index f1d000904..ce77bca17 100644 --- a/lite/kube/deployment.yaml +++ b/lite/kube/deployment.yaml @@ -19,8 +19,6 @@ spec: - name: servicex-lite image: sslhep/servicex_lite:latest imagePullPolicy: Always - # command: ["sleep"] - # args: ["infinity"] env: - name: ORIGIN_CONTEXT value: af-admin@af @@ -38,6 +36,8 @@ spec: value: "15" - name: PYTHONUNBUFFERED value: "true" + # - name: XCACHE only if you want to overwrite xcache + # value: "" volumeMounts: - name: kubeconfig mountPath: /home/sxlite/.kube/ diff --git a/lite/scripts/sXlite.py b/lite/scripts/sXlite.py index a50c1d978..0b1801a02 100644 --- a/lite/scripts/sXlite.py +++ b/lite/scripts/sXlite.py @@ -139,9 +139,13 @@ def get_deployment(self, req_id): } for e in c1.env: ta = {'name': e.name, 'value': e.value} + if "XCACHE" in os.environ and e.name == 'CACHE_PREFIX': + e.value = os.getenv('XCACHE') dep['spec']['template']['spec']['containers'][0]['env'].append(ta) for e in c2.env: ta = {'name': e.name, 'value': e.value} + if "XCACHE" in os.environ and e.name == 'CACHE_PREFIX': + e.value = os.getenv('XCACHE') dep['spec']['template']['spec']['containers'][1]['env'].append(ta) for e in c1.volumeMounts: From 6df04c590fcc45d5ff83dcc0cfd5d4af586a0095 Mon Sep 17 00:00:00 2001 From: Ilija Vukotic Date: Fri, 1 Mar 2024 12:09:44 -0600 Subject: [PATCH 22/38] fix --- lite/scripts/sXlite.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lite/scripts/sXlite.py b/lite/scripts/sXlite.py index 0b1801a02..1f5045060 100644 --- a/lite/scripts/sXlite.py +++ b/lite/scripts/sXlite.py @@ -140,12 +140,12 @@ def get_deployment(self, req_id): for e in c1.env: ta = {'name': e.name, 'value': e.value} if "XCACHE" in os.environ and e.name == 'CACHE_PREFIX': - e.value = os.getenv('XCACHE') + ta['value'] = os.getenv('XCACHE') dep['spec']['template']['spec']['containers'][0]['env'].append(ta) for e in c2.env: ta = {'name': e.name, 'value': e.value} if "XCACHE" in os.environ and e.name == 'CACHE_PREFIX': - e.value = os.getenv('XCACHE') + ta['value'] = os.getenv('XCACHE') dep['spec']['template']['spec']['containers'][1]['env'].append(ta) for e in c1.volumeMounts: From cfeda9930fbbde7dcaad1691dc96d7f98f0df6b7 Mon Sep 17 00:00:00 2001 From: Ilija Vukotic Date: Mon, 11 Mar 2024 17:05:03 -0500 Subject: [PATCH 23/38] extra annotations --- lite/kube/deployment.yaml | 2 ++ lite/scripts/sXlite.py | 21 +++++++++++++++++++-- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/lite/kube/deployment.yaml b/lite/kube/deployment.yaml index ce77bca17..0a20fcea1 100644 --- a/lite/kube/deployment.yaml +++ b/lite/kube/deployment.yaml @@ -38,6 +38,8 @@ spec: value: "true" # - name: XCACHE only if you want to overwrite xcache # value: "" + - name: EXTRA_ANNOTATIONS + value: "" volumeMounts: - name: kubeconfig mountPath: /home/sxlite/.kube/ diff --git a/lite/scripts/sXlite.py b/lite/scripts/sXlite.py index 1f5045060..229932fb7 100644 --- a/lite/scripts/sXlite.py +++ b/lite/scripts/sXlite.py @@ -89,10 +89,13 @@ def get_deployment(self, req_id): "kind": "Deployment", "metadata": {"name": o.metadata.name}, "spec": { - "replicas": initial_pods, + "replicas": 0, "selector": {"matchLabels": {'app': o.spec.selector.matchLabels.app}}, "template": { - "metadata": {"labels": {"app": o.spec.template.metadata.labels.app}}, + "metadata": { + "labels": {"app": o.spec.template.metadata.labels.app}, + "annotations": {} + }, "spec": { "containers": [ { @@ -227,7 +230,21 @@ def delete_configmap(self, name): print(f'could not delete configmap:{name}', e) def create_deployment(self, dep): + dep['spec']['replicas'] = initial_pods + + ea = os.getenv("EXTRA_ANNOTATIONS", "") + if ea: + ea_key = ea.split(":")[0].strip() + ea_val = ea.split(":")[1].strip() + dep['spec']['template']['metadata']['annotations'][ea_key] = ea_val + + dep['spec']['template']['spec']['containers'][0]['env']['site'] = os.environ.get( + 'SXLITE_CONTEXT') + dep['spec']['template']['spec']['containers'][1]['env']['site'] = os.environ.get( + 'SXLITE_CONTEXT') + print(f'creating deployment: {dep}') + try: dep = self.deployment_api.create(body=dep, namespace=self.ns) print(f'created deployment: {dep.metadata.name}') From d3bf104ac72b2f53ef3fda52a80dec43eeed021f Mon Sep 17 00:00:00 2001 From: Ilija Vukotic Date: Tue, 12 Mar 2024 11:09:06 -0500 Subject: [PATCH 24/38] fix --- lite/scripts/sXlite.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/lite/scripts/sXlite.py b/lite/scripts/sXlite.py index 229932fb7..20ff0197c 100644 --- a/lite/scripts/sXlite.py +++ b/lite/scripts/sXlite.py @@ -238,10 +238,9 @@ def create_deployment(self, dep): ea_val = ea.split(":")[1].strip() dep['spec']['template']['metadata']['annotations'][ea_key] = ea_val - dep['spec']['template']['spec']['containers'][0]['env']['site'] = os.environ.get( - 'SXLITE_CONTEXT') - dep['spec']['template']['spec']['containers'][1]['env']['site'] = os.environ.get( - 'SXLITE_CONTEXT') + site = {'site': os.environ.get('SXLITE_CONTEXT')} + dep['spec']['template']['spec']['containers'][0]['env'].append(site) + dep['spec']['template']['spec']['containers'][1]['env'].append(site) print(f'creating deployment: {dep}') From b6bb74cc2801fb72d6142d3e164817a6415583fb Mon Sep 17 00:00:00 2001 From: Ilija Vukotic Date: Tue, 12 Mar 2024 11:39:00 -0500 Subject: [PATCH 25/38] fix1 --- lite/scripts/sXlite.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lite/scripts/sXlite.py b/lite/scripts/sXlite.py index 20ff0197c..157d2bd14 100644 --- a/lite/scripts/sXlite.py +++ b/lite/scripts/sXlite.py @@ -238,7 +238,7 @@ def create_deployment(self, dep): ea_val = ea.split(":")[1].strip() dep['spec']['template']['metadata']['annotations'][ea_key] = ea_val - site = {'site': os.environ.get('SXLITE_CONTEXT')} + site = {"name": "site", "value": os.environ.get('SXLITE_CONTEXT')} dep['spec']['template']['spec']['containers'][0]['env'].append(site) dep['spec']['template']['spec']['containers'][1]['env'].append(site) From 8c48200fe3374a992464bb814ef07da49c916d67 Mon Sep 17 00:00:00 2001 From: Ilija Vukotic Date: Wed, 13 Mar 2024 12:39:39 -0500 Subject: [PATCH 26/38] adding HPA --- lite/kube/deployment.yaml | 2 ++ lite/scripts/sXlite.py | 53 ++++++++++++++++++++++++++++++++++++++- 2 files changed, 54 insertions(+), 1 deletion(-) diff --git a/lite/kube/deployment.yaml b/lite/kube/deployment.yaml index 0a20fcea1..c8f83df48 100644 --- a/lite/kube/deployment.yaml +++ b/lite/kube/deployment.yaml @@ -34,6 +34,8 @@ spec: value: "192.170.241.253" - name: INITIAL_PODS value: "15" + - name: MAX_PODS + value: "1000" - name: PYTHONUNBUFFERED value: "true" # - name: XCACHE only if you want to overwrite xcache diff --git a/lite/scripts/sXlite.py b/lite/scripts/sXlite.py index 157d2bd14..82850274a 100644 --- a/lite/scripts/sXlite.py +++ b/lite/scripts/sXlite.py @@ -12,6 +12,7 @@ rmq_user = os.getenv("RMQ_USER", 'user') rmq_host = os.getenv("RMQ_HOST", '192.170.241.253') initial_pods = int(os.getenv("INITIAL_PODS", '15')) +max_pods = int(os.getenv("MAX_PODS", '1000')) class cluster: @@ -25,6 +26,8 @@ def __init__(self, context) -> None: self.secret_api = client.resources.get(api_version="v1", kind="Secret") self.deployment_api = client.resources.get(api_version="apps/v1", kind="Deployment") self.cm_api = client.resources.get(api_version="v1", kind="ConfigMap") + self.hpa_api = client.resources.get( + api_version="autoscaling/v2", kind="HorizontalPodAutoscaler") def getNodes(self): for item in self.node_api.get().items: @@ -258,6 +261,52 @@ def delete_deployment(self, name): except Exception as e: print(f'could not delete deployment:{name}', e) + def create_hpa(self, name): + print(f'creating hpa: {name}') + hpa = { + "apiVersion": "autoscaling/v2", + "kind": "HorizontalPodAutoscaler", + "metadata": { + "name": name, + "namespace": self.ns, + }, + "spec": { + "scaleTargeRef": { + "kind": "Deployment", + "name": f"transformer-{name}", + "apiVersion": "apps/v1" + }, + "minReplicas": initial_pods, + "maxReplicas": max_pods, + "metrics": [ + { + "type": "Resource", + "resource": { + "name": "cpu", + "target": { + "type": "Utilization", + "averageUtilization": 30 + } + } + } + + ] + } + } + try: + secret = self.hpa_api.create(body=hpa, namespace=self.ns) + print(f'created secret: {name}') + except exceptions.ConflictError: + print(f'conflict creating secret: {name}') + + def delete_hpa(self, name): + print(f'deleting HPA: {name}') + try: + self.hpa_api.delete(body={}, name=name, namespace=self.ns) + print(f'deleted hpa: {name}') + except Exception as e: + print(f'could not delete hpa:{name}', e) + if __name__ == '__main__': @@ -282,7 +331,7 @@ def start(): sxl = sXlite() sxo.patch_master() - # cleanup() + cleanup() start() count = 0 @@ -293,9 +342,11 @@ def start(): sxl.create_deployment(d) cm = sxo.read_configmap(f'{req_id}-generated-source') sxl.create_configmap(cm) + sxl.create_hpa(req_id) requests['active'].append(req_id) for req_id in requests['unknown']: + sxl.delete_deployment(req_id) sxl.delete_configmap(f'{req_id}-generated-source') sxl.delete_deployment(f'transformer-{req_id}') From 95967d973835923c8a8a9838a943f4dc49979cb9 Mon Sep 17 00:00:00 2001 From: Ilija Vukotic Date: Wed, 13 Mar 2024 13:02:18 -0500 Subject: [PATCH 27/38] typo --- lite/scripts/sXlite.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/lite/scripts/sXlite.py b/lite/scripts/sXlite.py index 82850274a..93312c388 100644 --- a/lite/scripts/sXlite.py +++ b/lite/scripts/sXlite.py @@ -271,10 +271,9 @@ def create_hpa(self, name): "namespace": self.ns, }, "spec": { - "scaleTargeRef": { + "scaleTargetRef": { "kind": "Deployment", - "name": f"transformer-{name}", - "apiVersion": "apps/v1" + "name": f"transformer-{name}" }, "minReplicas": initial_pods, "maxReplicas": max_pods, From ee230b2955a7a5a36f714860bda3719020a16f58 Mon Sep 17 00:00:00 2001 From: Ilija Vukotic Date: Wed, 13 Mar 2024 13:40:37 -0500 Subject: [PATCH 28/38] copy/paste error --- lite/scripts/sXlite.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lite/scripts/sXlite.py b/lite/scripts/sXlite.py index 93312c388..b6e942144 100644 --- a/lite/scripts/sXlite.py +++ b/lite/scripts/sXlite.py @@ -345,7 +345,7 @@ def start(): requests['active'].append(req_id) for req_id in requests['unknown']: - sxl.delete_deployment(req_id) + sxl.delete_hpa(req_id) sxl.delete_configmap(f'{req_id}-generated-source') sxl.delete_deployment(f'transformer-{req_id}') From 724c9ccb64c0dda4a21e99bc823fe5a20bb73e6f Mon Sep 17 00:00:00 2001 From: Ilija Vukotic Date: Wed, 13 Mar 2024 14:28:51 -0500 Subject: [PATCH 29/38] add api version --- lite/scripts/sXlite.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lite/scripts/sXlite.py b/lite/scripts/sXlite.py index b6e942144..91d87aafc 100644 --- a/lite/scripts/sXlite.py +++ b/lite/scripts/sXlite.py @@ -273,7 +273,8 @@ def create_hpa(self, name): "spec": { "scaleTargetRef": { "kind": "Deployment", - "name": f"transformer-{name}" + "name": f"transformer-{name}", + "apiVersion": "apps/v1" }, "minReplicas": initial_pods, "maxReplicas": max_pods, From c54d66282f81d18e0f51efb21f2c44896a31ab67 Mon Sep 17 00:00:00 2001 From: Ilija Vukotic Date: Wed, 13 Mar 2024 15:48:15 -0500 Subject: [PATCH 30/38] fixing prinout, hpa version --- lite/kube/deployment.yaml | 2 ++ lite/scripts/sXlite.py | 10 ++++++---- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/lite/kube/deployment.yaml b/lite/kube/deployment.yaml index c8f83df48..3aa406e31 100644 --- a/lite/kube/deployment.yaml +++ b/lite/kube/deployment.yaml @@ -42,6 +42,8 @@ spec: # value: "" - name: EXTRA_ANNOTATIONS value: "" + - name: HPA_VERSION + value: "v2" volumeMounts: - name: kubeconfig mountPath: /home/sxlite/.kube/ diff --git a/lite/scripts/sXlite.py b/lite/scripts/sXlite.py index 91d87aafc..023a9f969 100644 --- a/lite/scripts/sXlite.py +++ b/lite/scripts/sXlite.py @@ -14,6 +14,8 @@ initial_pods = int(os.getenv("INITIAL_PODS", '15')) max_pods = int(os.getenv("MAX_PODS", '1000')) +hpa_version = os.getenv("HPA_VERSION", 'v2') + class cluster: def __init__(self, context) -> None: @@ -264,7 +266,7 @@ def delete_deployment(self, name): def create_hpa(self, name): print(f'creating hpa: {name}') hpa = { - "apiVersion": "autoscaling/v2", + "apiVersion": f"autoscaling/{hpa_version}", "kind": "HorizontalPodAutoscaler", "metadata": { "name": name, @@ -294,10 +296,10 @@ def create_hpa(self, name): } } try: - secret = self.hpa_api.create(body=hpa, namespace=self.ns) - print(f'created secret: {name}') + self.hpa_api.create(body=hpa, namespace=self.ns) + print(f'created hpa: {name}') except exceptions.ConflictError: - print(f'conflict creating secret: {name}') + print(f'conflict creating hpa: {name}') def delete_hpa(self, name): print(f'deleting HPA: {name}') From c26b619a7c8be22bd1727c52ac6bfbe192db1f8b Mon Sep 17 00:00:00 2001 From: Ilija Vukotic Date: Wed, 13 Mar 2024 15:58:54 -0500 Subject: [PATCH 31/38] more changes --- lite/scripts/sXlite.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/lite/scripts/sXlite.py b/lite/scripts/sXlite.py index 023a9f969..f4c0af14e 100644 --- a/lite/scripts/sXlite.py +++ b/lite/scripts/sXlite.py @@ -20,16 +20,14 @@ class cluster: def __init__(self, context) -> None: print(f'initializing context:{context}') - client = dynamic.DynamicClient( + self.client = dynamic.DynamicClient( api_client.ApiClient(configuration=config.load_kube_config(context=context)) ) - # self.service_api = client.resources.get(api_version="v1", kind="Service") - self.node_api = client.resources.get(api_version="v1", kind="Node") - self.secret_api = client.resources.get(api_version="v1", kind="Secret") - self.deployment_api = client.resources.get(api_version="apps/v1", kind="Deployment") - self.cm_api = client.resources.get(api_version="v1", kind="ConfigMap") - self.hpa_api = client.resources.get( - api_version="autoscaling/v2", kind="HorizontalPodAutoscaler") + # self.service_api = self.client.resources.get(api_version="v1", kind="Service") + self.node_api = self.client.resources.get(api_version="v1", kind="Node") + self.secret_api = self.client.resources.get(api_version="v1", kind="Secret") + self.deployment_api = self.client.resources.get(api_version="apps/v1", kind="Deployment") + self.cm_api = self.client.resources.get(api_version="v1", kind="ConfigMap") def getNodes(self): for item in self.node_api.get().items: @@ -201,6 +199,8 @@ def __init__(self) -> None: sys.exit(1) super().__init__(context) self.ns = namespace + self.hpa_api = self.client.resources.get( + api_version=f"autoscaling/{hpa_version}", kind="HorizontalPodAutoscaler") def create_secret(self, secret): print(f'creating secret: {secret.metadata.name}') From b0603b6a595e11f29e8d3f20fbeaeaf51662f7e1 Mon Sep 17 00:00:00 2001 From: Ilija Vukotic Date: Wed, 13 Mar 2024 16:43:27 -0500 Subject: [PATCH 32/38] make hpa cpu utilization configurable --- lite/kube/deployment.yaml | 2 ++ lite/scripts/sXlite.py | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/lite/kube/deployment.yaml b/lite/kube/deployment.yaml index 3aa406e31..0c8287306 100644 --- a/lite/kube/deployment.yaml +++ b/lite/kube/deployment.yaml @@ -44,6 +44,8 @@ spec: value: "" - name: HPA_VERSION value: "v2" + - name: HPA_CPU_UTILIZATION + value: "10" volumeMounts: - name: kubeconfig mountPath: /home/sxlite/.kube/ diff --git a/lite/scripts/sXlite.py b/lite/scripts/sXlite.py index f4c0af14e..a555cfd6f 100644 --- a/lite/scripts/sXlite.py +++ b/lite/scripts/sXlite.py @@ -15,6 +15,7 @@ max_pods = int(os.getenv("MAX_PODS", '1000')) hpa_version = os.getenv("HPA_VERSION", 'v2') +hpa_cpu_utilization = int(os.getenv("HPA_CPU_UTILIZATION", '10')) class cluster: @@ -287,7 +288,7 @@ def create_hpa(self, name): "name": "cpu", "target": { "type": "Utilization", - "averageUtilization": 30 + "averageUtilization": hpa_cpu_utilization } } } From f2f8dae6662cf96b0c05f06ed8b735d142d7c21a Mon Sep 17 00:00:00 2001 From: Ilija Vukotic Date: Mon, 25 Mar 2024 14:49:29 -0500 Subject: [PATCH 33/38] adding env for k8s node name --- lite/scripts/sXlite.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/lite/scripts/sXlite.py b/lite/scripts/sXlite.py index a555cfd6f..71004b2fb 100644 --- a/lite/scripts/sXlite.py +++ b/lite/scripts/sXlite.py @@ -106,7 +106,13 @@ def get_deployment(self, req_id): "name": c1.name, "image": c1.image, "command": ["bash", "-c"], - "env": [], + "env": [ + {"name": "K8S_NODE", + "valueFrom": { + "fieldRef": {"fieldPath": "spec.nodeName"} + } + } + ], "volumeMounts": [], "resources": { "requests": { @@ -124,7 +130,13 @@ def get_deployment(self, req_id): "name": c2.name, "image": c2.image, "command": ["bash", "-c"], - "env": [], + "env": [ + {"name": "K8S_NODE", + "valueFrom": { + "fieldRef": {"fieldPath": "spec.nodeName"} + } + } + ], "volumeMounts": [], "resources": { "requests": { From a49dfee8ce8e1e82b526bbca2f15161ff9700e46 Mon Sep 17 00:00:00 2001 From: Ilija Vukotic Date: Mon, 25 Mar 2024 15:35:38 -0500 Subject: [PATCH 34/38] host_name --- lite/scripts/sXlite.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lite/scripts/sXlite.py b/lite/scripts/sXlite.py index 71004b2fb..a8e8e491a 100644 --- a/lite/scripts/sXlite.py +++ b/lite/scripts/sXlite.py @@ -131,7 +131,7 @@ def get_deployment(self, req_id): "image": c2.image, "command": ["bash", "-c"], "env": [ - {"name": "K8S_NODE", + {"name": "HOST_NAME", "valueFrom": { "fieldRef": {"fieldPath": "spec.nodeName"} } @@ -157,11 +157,15 @@ def get_deployment(self, req_id): } } for e in c1.env: + if e.name == 'HOST_NAME': + continue ta = {'name': e.name, 'value': e.value} if "XCACHE" in os.environ and e.name == 'CACHE_PREFIX': ta['value'] = os.getenv('XCACHE') dep['spec']['template']['spec']['containers'][0]['env'].append(ta) for e in c2.env: + if e.name == 'HOST_NAME': + continue ta = {'name': e.name, 'value': e.value} if "XCACHE" in os.environ and e.name == 'CACHE_PREFIX': ta['value'] = os.getenv('XCACHE') From 16004e090f0c59be856c9dc4b8ff27a8d3eb38f5 Mon Sep 17 00:00:00 2001 From: Ilija Vukotic Date: Mon, 25 Mar 2024 16:01:11 -0500 Subject: [PATCH 35/38] fix --- lite/scripts/sXlite.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lite/scripts/sXlite.py b/lite/scripts/sXlite.py index a8e8e491a..0debbdf23 100644 --- a/lite/scripts/sXlite.py +++ b/lite/scripts/sXlite.py @@ -107,7 +107,7 @@ def get_deployment(self, req_id): "image": c1.image, "command": ["bash", "-c"], "env": [ - {"name": "K8S_NODE", + {"name": "HOST_NAME", "valueFrom": { "fieldRef": {"fieldPath": "spec.nodeName"} } From 83bc4d56de203dc4c74cac8d5a10ed05d60ca25d Mon Sep 17 00:00:00 2001 From: Ilija Vukotic Date: Fri, 5 Apr 2024 14:09:20 -0500 Subject: [PATCH 36/38] cleaner args --- lite/scripts/sXlite.py | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/lite/scripts/sXlite.py b/lite/scripts/sXlite.py index 0debbdf23..da17204e9 100644 --- a/lite/scripts/sXlite.py +++ b/lite/scripts/sXlite.py @@ -105,7 +105,7 @@ def get_deployment(self, req_id): { "name": c1.name, "image": c1.image, - "command": ["bash", "-c"], + "command": o.spec.template.spec.containers[0].command, "env": [ {"name": "HOST_NAME", "valueFrom": { @@ -124,12 +124,11 @@ def get_deployment(self, req_id): "cpu": "1" } }, - "args": [] }, { "name": c2.name, "image": c2.image, - "command": ["bash", "-c"], + "command": o.spec.template.spec.containers[1].command, "env": [ {"name": "HOST_NAME", "valueFrom": { @@ -148,7 +147,7 @@ def get_deployment(self, req_id): "cpu": "1" } }, - "args": ['-c'], + "args": o.spec.template.spec.containers[1].args } ], "volumes": [] @@ -190,14 +189,12 @@ def get_deployment(self, req_id): vo['configMap'] = {'name': cm.name, 'defaultMode': cm.defaultMode} dep['spec']['template']['spec']['volumes'].append(vo) - sidecar_args = [ - f'PYTHONPATH=/servicex/transformer_sidecar:$PYTHONPATH python /servicex/transformer_sidecar/transformer.py --request-id {req_id} --rabbit-uri amqp://{rmq_user}:{rmq_pass}@{rmq_host}:5672/%2F?heartbeat=9000 --result-destination object-store --result-format root-file' - ] - transformer_args = [ - f'until [ -f /servicex/output/scripts/proxy-exporter.sh ];do sleep 5;done && /servicex/output/scripts/proxy-exporter.sh & sleep 5 && cp /generated/transformer_capabilities.json /servicex/output && PYTHONPATH=/generated:$PYTHONPATH bash /servicex/output/scripts/watch.sh python /generated/transform_single_file.py /servicex/output/{req_id}' - ] - dep['spec']['template']['spec']['containers'][0]['args'] = sidecar_args - dep['spec']['template']['spec']['containers'][1]['args'] = transformer_args + sidecar_arg = o['spec']['template']['spec']['containers'][0]['args'][0] + + # replace RMQ address in sidecar_args + to_replace = sidecar_arg[s.index('amqp://')+7: s.index(':5672')] + sidecar_arg = sidecar_arg.replace(to_replace, f'{rmq_user}:{rmq_pass}@{rmq_host}') + dep['spec']['template']['spec']['containers'][0]['args'] = [sidecar_arg] print('=========================') # print(dep) From 69ff57d3e50e536e9d6aac3a3ae325ddc4512c96 Mon Sep 17 00:00:00 2001 From: Ilija Vukotic Date: Fri, 5 Apr 2024 14:25:58 -0500 Subject: [PATCH 37/38] typo --- lite/scripts/sXlite.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lite/scripts/sXlite.py b/lite/scripts/sXlite.py index da17204e9..1df206c57 100644 --- a/lite/scripts/sXlite.py +++ b/lite/scripts/sXlite.py @@ -192,7 +192,7 @@ def get_deployment(self, req_id): sidecar_arg = o['spec']['template']['spec']['containers'][0]['args'][0] # replace RMQ address in sidecar_args - to_replace = sidecar_arg[s.index('amqp://')+7: s.index(':5672')] + to_replace = sidecar_arg[sidecar_arg.index('amqp://')+7: sidecar_arg.index(':5672')] sidecar_arg = sidecar_arg.replace(to_replace, f'{rmq_user}:{rmq_pass}@{rmq_host}') dep['spec']['template']['spec']['containers'][0]['args'] = [sidecar_arg] From 26240fe69cbd8a52c8cc4696c46f5e3674e29620 Mon Sep 17 00:00:00 2001 From: Ilija Vukotic Date: Mon, 15 Apr 2024 10:10:56 -0500 Subject: [PATCH 38/38] "imagePullPolicy": "Always", --- lite/scripts/sXlite.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lite/scripts/sXlite.py b/lite/scripts/sXlite.py index 1df206c57..7cc5935f3 100644 --- a/lite/scripts/sXlite.py +++ b/lite/scripts/sXlite.py @@ -113,6 +113,7 @@ def get_deployment(self, req_id): } } ], + "imagePullPolicy": "Always", "volumeMounts": [], "resources": { "requests": { @@ -136,6 +137,7 @@ def get_deployment(self, req_id): } } ], + "imagePullPolicy": "Always", "volumeMounts": [], "resources": { "requests": {