diff --git a/.github/workflows/deploy-config.json b/.github/workflows/deploy-config.json index 8e8b83f70..e436c511f 100644 --- a/.github/workflows/deploy-config.json +++ b/.github/workflows/deploy-config.json @@ -48,5 +48,10 @@ "dir_name": "minio_cleanup", "image_name": "servicex_minio_cleanup", "test_required": false + }, + { + "dir_name": "lite", + "image_name": "servicex_lite", + "test_required": false } ] diff --git a/docs/deployment/basic.md b/docs/deployment/basic.md index a6de0d283..5ed78f33c 100644 --- a/docs/deployment/basic.md +++ b/docs/deployment/basic.md @@ -57,8 +57,8 @@ servicex --namespace init --cert-dir ~/.globus ``` By default, this will look for the certificates in your `~/.globus` directory. -You can pass another directory with the `--cert-dir` argument. It is assumed -that they are named `usercert.pem` and `userkey.pem`. You will be prompted for +You can pass another directory with the `--cert-dir` argument. It is assumed +that they are named `usercert.pem` and `userkey.pem`. You will be prompted for the passphrase that secures your X509 private key. The installed secrets can be used by any ServiceX instance deployed into the @@ -141,7 +141,7 @@ helm install -f values.yaml --version v1.0.0-rc.3 servicex ssl-hep/servicex Initial deployment is typically rapid, with RabbitMQ requiring up to a minute to complete its initialization. The `servicex` argument is used by helm as the release -name. It is used to refer to the chart when deploying, insptacting, or deleting +name. It is used to refer to the chart when deploying, inspecting, or deleting the chart. After this all the pods of the new deployment should be ready. You can check the status of the pods via diff --git a/docs/deployment/production.md b/docs/deployment/production.md index 1592fb924..66ca0f1e8 100644 --- a/docs/deployment/production.md +++ b/docs/deployment/production.md @@ -1,29 +1,31 @@ # ServiceX in production -This guide is aimed at those interested in making production ServiceX +This guide is aimed at those interested in making production ServiceX deployments that are publicly accessible and require authentication. -For a guide to making a simple deployment of ServiceX with no extra features, +For a guide to making a simple deployment of ServiceX with no extra features, check out our [basic deployment guide](basic.md). ## Prerequisites -- A Kubernetes cluster running K8s version 1.16 or later + +- A Kubernetes cluster running K8s version 1.16 or later with an ingress controller such as NGINX. - [Helm 3](https://helm.sh/docs/intro/install/) installed. -- You've used the ServiceX CLI to install your grid certificates on +- You've used the ServiceX CLI to install your grid certificates on your cluster (if not, see the basic guide). -- An initial `values.yaml` file for making working ServiceX deployments, +- An initial `values.yaml` file for making working ServiceX deployments, such as the one in the basic guide. ## External access -It's easy to deploy a ServiceX instance with no external access, but this +It's easy to deploy a ServiceX instance with no external access, but this is of limited value. We will now update `values.yaml` to add external ingress. ### Adding an Ingress to the ServiceX app -Configure an Ingress resource for the ServiceX API server by adding the +Configure an Ingress resource for the ServiceX API server by adding the following section to your values file: + ```yaml app: ingress: @@ -32,34 +34,39 @@ app: host: ``` -The ServiceX API server will be located at a subdomain of the domain name -given in `app.ingress.host`. -The name of the subdomain will match the Helm release name +The ServiceX API server will be located at a subdomain of the domain name +given in `app.ingress.host`. +The name of the subdomain will match the Helm release name (the first position argument provided with the `helm install` command), so the full URL will be `.`. For example, if your values file contains: + ```yaml app: ingress: enabled: true host: servicex.ssl-hep.org ``` + and you deployed the helm chart with + ``` helm install -f values.yaml --version v1.0.0-rc.3 my-release ssl-hep/servicex ``` + then the instance's URL would be `my-release.servicex.ssl-hep.org`. -You should also make sure the host has a DNS A record pointing this +You should also make sure the host has a DNS A record pointing this subdomain at the external IP address of your ingress controller. -The `app.ingress.class` value is used to set the `kubernetes.io/ingress.class` -annotation on the Ingress resource. It defaults to `nginx`, but you can set a +The `app.ingress.class` value is used to set the `kubernetes.io/ingress.class` +annotation on the Ingress resource. It defaults to `nginx`, but you can set a different value, such as `traefik`. ### Adding an Ingress to Minio -ServiceX stores files in a Minio object store which is deployed as a + +ServiceX stores files in a Minio object store which is deployed as a subchart. The Helm chart for Minio has its own support for an Ingress, which we can activate like so: @@ -72,17 +79,19 @@ minio: hostname: my-release-minio.servicex.ssl-hep.org ``` -Unlike the ServiceX Ingress, the subchart doesn't know the name of our +Unlike the ServiceX Ingress, the subchart doesn't know the name of our deployment, so you need to hardcode it in the Minio Ingress host -(this is a current limitation of the Minio chart). +(this is a current limitation of the Minio chart). The value should be `-minio.`. ### Ingress at CERN k8s cluster -For ingress to work at CERN, one needs at least two loadbalancers allowed for your project. + +For ingress to work at CERN, one needs at least two loadbalancers allowed for your project. CERN documentation is [here](https://clouddocs.web.cern.ch/networking/load_balancing.html#kubernetes-service-type-loadbalancer). Start by turning off the charts ingress: + ```yaml app: ingress: @@ -90,6 +99,7 @@ app: ``` Create loadbalancer service like this: + ```yaml apiVersion: v1 kind: ServiceX @@ -109,6 +119,7 @@ spec: ``` Verify that you can access it using just IP, then create a DNS for it: + ``` openstack loadbalancer set --description my-domain-name ServiceX-LB ping my-domain-name.cern.ch @@ -118,32 +129,35 @@ Once service is accessible from inside CERN, you may ask for the firewall to be The procedure should be repeated for MinIO. ## Configuring Ingress resources to use TLS + It's a good idea to enable TLS for both of these Ingress resources. There are two ways to do this: you can either obtain certificates and -install the TLS Secrets manually, or you can use the -[cert-manager](https://cert-manager.io/docs/) Kubernetes add-on to +install the TLS Secrets manually, or you can use the +[cert-manager](https://cert-manager.io/docs/) Kubernetes add-on to issue certificates and create the Secrets automatically. Separate guides for both options are provided below. Either way, the first step is to set `app.ingress.tls.enabled` to `true`. ### Without cert-manager -First, obtain a TLS certificate and private key for each Ingress + +First, obtain a TLS certificate and private key for each Ingress (two pairs in total). -This can be done using a trusted Certificate Authority (CA), such as +This can be done using a trusted Certificate Authority (CA), such as [Let's Encrypt](https://letsencrypt.org/). -Make sure that each certificate Common Name matches the hostname of the +Make sure that each certificate Common Name matches the hostname of the corresponding Ingress. Once you have your certs, you can install them to your cluster as [TLS Secrets](https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets): `kubectl create secret tls --cert= --key=` -By default, the ServiceX chart looks for a Secret named -`-app-tls`. You can specify a different name in your values +By default, the ServiceX chart looks for a Secret named +`-app-tls`. You can specify a different name in your values using `app.ingress.tls.secretName`. Your final values should look something like: + ```yaml app: ingress: @@ -156,6 +170,7 @@ app: Adding TLS to the Minio subchart is slightly different. The configuration is as follows: + ```yaml minio: apiIngress: @@ -169,24 +184,27 @@ minio: - my-release-minio.servicex.ssl-hep.org secretName: my-release-minio-tls ``` -Remember to replace `my-release` and `servicex.ssl-hep.org` with your Helm release name and app ingress host, respectively. + +Remember to replace `my-release` and `servicex.ssl-hep.org` with your Helm release name and app ingress host, respectively. Here, you must specify a secret name; there is no default. ### With cert-manager + Alternately, you can let cert-manager handle the TLS certificates. To use it, complete the following steps: - [Install cert-manager](https://cert-manager.io/docs/installation/kubernetes/) on your cluster if it's not already installed. -- Deploy one or more ClusterIssuers, or check that one is already present. +- Deploy one or more ClusterIssuers, or check that one is already present. The Let's Encrypt staging and production ClusterIssuers are recommended. - In `values.yaml`, set `app.ingress.tls.clusterIssuer` to the name of the ClusterIssuer you'd like to use (e.g. `letsencrypt-prod`). -Browsers will trust `letsencrypt-prod` automatically, but bear in mind that +Browsers will trust `letsencrypt-prod` automatically, but bear in mind that it's subject to [rate limits](https://letsencrypt.org/docs/rate-limits/), so it's best to use `letsencrypt-staging` for development. Your values should now look like: + ```yaml app: ingress: @@ -198,6 +216,7 @@ app: For more information, see the cert-manager [guide to securing nginx-ingress](https://cert-manager.io/docs/tutorials/acme/ingress). To enable TLS for Minio, use the following configuration: + ```yaml minio: apiIngress: @@ -213,31 +232,34 @@ minio: - my-release-minio.servicex.ssl-hep.org secretName: my-release-minio-tls ``` -Once again, remember to replace `my-release` and `servicex.ssl-hep.org` with + +Once again, remember to replace `my-release` and `servicex.ssl-hep.org` with your Helm release name and app ingress host, respectively. ## Securing the deployment with authentication + If you wish, you could deploy these values and have a ServiceX instance that is not secured but is reachable via the public URL. -This is okay for a sneak peek, but not recommended for long-lived deployments, +This is okay for a sneak peek, but not recommended for long-lived deployments, since your grid certs will be usable by anyone on the Internet. -To prevent this, ServiceX supports an authentication system which requires -new users to create accounts with your ServiceX deployment by authenticating -to Globus with the identity provider of their choice +To prevent this, ServiceX supports an authentication system which requires +new users to create accounts with your ServiceX deployment by authenticating +to Globus with the identity provider of their choice (such as CERN or their university). - ### Setting up Globus Auth + Globus Auth requires your deployment to be served over HTTPS, so make sure you have completed the TLS section above. -Visit [developers.globus.org](https://developers.globus.org) -and select ___Register your app with Globus___. -Create a project for ServiceX and within that project click on +Visit [developers.globus.org](https://developers.globus.org) +and select ___Register your app with Globus___. +Create a project for ServiceX and within that project click on ___Add new app___. The app name can be whatever you like. The scopes should include: + ``` openid email @@ -253,18 +275,21 @@ If you want to use port-forwarding, also include Save the record. Copy the Client ID and paste this into your `values.yaml`. + ```yaml app: globusClientID: ``` Generate a Client Secret and paste this value into `values.yaml` as well: + ```yaml app: globusClientSecret: ``` Finally, you can enable authentication in `values.yaml`: + ```yaml app: auth: true @@ -272,43 +297,46 @@ app: ``` The system works as follows: -- New users will be required to create accounts with their Globus logins. -- New accounts will be pending, and cannot make requests until approved. + +- New users will be required to create accounts with their Globus logins. +- New accounts will be pending, and cannot make requests until approved. - Accounts must be approved by a ServiceX admin. -- To bootstrap the initial admin account, you must set `app.adminEmail` +- To bootstrap the initial admin account, you must set `app.adminEmail` to the email address associated with the administrator's Globus account. ### Approving new accounts from Slack -ServiceX can send notifications of new user registrations to the Slack channel -of your choice and allow administrators to approve pending users directly from -Slack. -This is strongly recommended for convenience, as currently the only other way -to approve accounts is to manually send HTTP requests to the API server via + +ServiceX can send notifications of new user registrations to the Slack channel +of your choice and allow administrators to approve pending users directly from +Slack. +This is strongly recommended for convenience, as currently the only other way +to approve accounts is to manually send HTTP requests to the API server via a tool like Postman or curl. -To set this up, complete the following steps **before deploying** ServiceX: +To set this up, complete the following steps __before deploying__ ServiceX: -- Create a secure Slack channel in your workspace (suggested name: `#servicex-signups`), accessible only to developers or administrators of ServiceX. -- Go to https://api.slack.com/apps and click **Create New App**. -Fill in ServiceX as the name and choose your workspace. -If you are going to make multiple ServiceX deployments, +- Create a secure Slack channel in your workspace (suggested name: `#servicex-signups`), accessible only to developers or administrators of ServiceX. +- Go to and click __Create New App__. +Fill in ServiceX as the name and choose your workspace. +If you are going to make multiple ServiceX deployments, you may want a more descriptive name, such as "ServiceX xAOD". -- Scroll down to the App Credentials section and find your Signing Secret. +- Scroll down to the App Credentials section and find your Signing Secret. Copy this value and place it in your values file as `app.slackSigningSecret`. - Scroll up to the feature list, click on Incoming Webhooks, and click the switch to turn them on. -- Click the **Add New Webhook to Workspace** button at the bottom, choose your signups channel, and click the **Allow** button. +- Click the __Add New Webhook to Workspace__ button at the bottom, choose your signups channel, and click the __Allow__ button. - Copy the Webhook URL and store it in your values under `app.newSignupWebhook`. - After completing the rest of the configuration, deploy ServiceX. - Go back to the [Slack App dashboard](https://api.slack.com/apps) and choose the app you created earlier. In the sidebar, click on Interactivity & Shortcuts under Features. -- Click the switch to turn Interactivity on. In the Request URL field, enter the base URL for the ServiceX API, followed by `/slack`, e.g. +- Click the switch to turn Interactivity on. In the Request URL field, enter the base URL for the ServiceX API, followed by `/slack`, e.g. `https://my-release.servicex.ssl-hep.org/slack`. Save your changes. - You're all set! ServiceX will now send interactive Slack notifications to your signups channel whenever a new user registers. ### Email Notifications -ServiceX can send email notifications to newly registered users via -[Mailgun](https://www.mailgun.com/) once their access has been approxed by an -administrator. To enable this, obtain a Mailgun API key and -[verified domain](https://documentation.mailgun.com/en/latest/quickstart-sending.html#verify-your-domain) + +ServiceX can send email notifications to newly registered users via +[Mailgun](https://www.mailgun.com/) once their access has been approxed by an +administrator. To enable this, obtain a Mailgun API key and +[verified domain](https://documentation.mailgun.com/en/latest/quickstart-sending.html#verify-your-domain) and set `app.mailgunApiKey` and `app.mailgunDomain` in your values file`. ## Scaling @@ -326,11 +354,14 @@ rabbitmq: cpu: 100m replicas: 3 ``` + ## Using SealedSecrets to Keep All Config In GitHub + We use Bitnami's Sealed Secrets Controller to allow us to check all of the -config into GitHub. +config into GitHub. Install sealed secrets helm chart + ```bash helm repo add sealed-secrets https://bitnami-labs.github.io/sealed-secrets helm install sealed-secrets --namespace kube-system sealed-secrets/sealed-secrets @@ -339,8 +370,9 @@ Install sealed secrets helm chart You will need the `kubeseal` command on your computer. Follow instructions for [the various options](https://github.com/bitnami-labs/sealed-secrets#homebrew) -Create a secrets file using the [example_secrets.yaml](../example_secrets.yaml). -Encrypt it using kubeseal with +Create a secrets file using the [example_secrets.yaml](../example_secrets.yaml). +Encrypt it using kubeseal with + ```console cat deployed_values/dev-secrets.yaml | kubeseal --controller-namespace kube-system --controller-name sealed-secrets --format yaml > deployed_values/dev-sealed-secrets.yaml ``` @@ -350,6 +382,7 @@ that file into the cluster, it will be unsealed and turned into a plaintext secr that can be mounted into the App's deployment as env vars. ## Autoscaling + ServiceX should automatically scale up/down number of transformers. For this to work it uses Horizontal Pod Autoscaler (HPA). For the HPA to work, k8s cluster needs to be able to measure CPU utilization of the pods. This is easiest enabled by installing [metric-server](https://github.com/kubernetes-sigs/metrics-server). The latest one is easily installed and supports up to 100 nodes by default: ```bash diff --git a/docs/deployment/reference.md b/docs/deployment/reference.md index bf0d0097c..63593dd97 100644 --- a/docs/deployment/reference.md +++ b/docs/deployment/reference.md @@ -6,106 +6,111 @@ The following table lists the configurable parameters of the ServiceX chart and their default values. Note that you may also wish to change some of the default parameters for the [rabbitMQ](https://github.com/bitnami/charts/tree/master/bitnami/rabbitmq) or [minio](https://github.com/minio/charts) subcharts. -| Parameter | Description | Default | +| Parameter | Description | Default | |--------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------| -| `secrets` | Name of a secret deployed into the cluster. Must follow example_secrets.yaml | - | -| `logging.logstash.enabled` | Enable remote logging | true | -| `logging.logstash.host` | Host running logstash listening for log data | `servicex.atlas-ml.org` | -| `logging.logstash.port` | Port to send logging to | 5959 | -| `logging.logstash.protocol` | Protocol to be used (options are TCP and UDP) | TCP | -| `logging.logstash.monitor` | Link to be shown inside Monitor web page iframe | UC Kibana dashboard | -| `logging.logstash.logs` | Link to be shown inside Logs web page iframe | UC Kibana dashboard | -| `app.image` | ServiceX_App image name | `sslhep/servicex_app` | -| `app.tag` | ServiceX image tag | `latest` | -| `app.logLevel` | Logging level for ServiceX web app (uses standard unix levels) | `WARNING` | -| `app.pullPolicy` | ServiceX image pull policy | `Always` | -| `app.checksImage` | ServiceX init container image for checks | `ncsa/checks:latest` | -| `app.rabbitmq.retries` | Number of times to retry connecting to RabbitMQ on startup | 12 | -| `app.rabbitmq.retry_interval` | Number of seconds to wait between RabbitMQ retries on startup | 10 | -| `app.replicas` | Number of App pods to start. Experimental! | 1 | -| `app.auth` | Enable authentication or allow unfettered access (Python boolean string) | `false` | -| `app.globusClientID` | Globus application Client ID | - | -| `app.globusClientSecret` | Globus application Client Secret | - | -| `app.adminEmail` | Email address for initial admin user | | -| `app.tokenExpires` | Seconds until the ServiceX API tokens (JWT refresh tokens) expire | False (never) | -| `app.authExpires` | Seconds until the JWT access tokens expire | 21600 (six hours) | -| `app.ingress.enabled` | Enable install of ingress | false | -| `app.ingress.class` | Class to be set in `kubernetes.io/ingress.class` annotation | nginx | -| `app.ingress.host` | Hostname to associate ingress with | servicex.ssl-hep.org | -| `app.ingress.defaultBackend` | Name of a service to send requests to internal endpoints to | default-http-backend | -| `app.ingress.tls.enabled` | Enable TLS for ServiceX API Ingress resource | false | -| `app.ingress.tls.secretName` | Name of TLS Secret used for ServiceX API server | `{{.Release.Name}}-app-tls` | -| `app.ingress.tls.clusterIssuer` | Specify a ClusterIssuer if using cert-manager | - | -| `app.resources` | Pass in Kubernetes pod resource spec to deployment to change CPU and memory | { } | -| `app.slackSigningSecret` | Signing secret for Slack application | - | -| `app.newSignupWebhook` | Slack webhook URL for new signups | - | -| `app.mailgunApiKey` | API key to send Mailgun emails to newly approved users | - | -| `app.mailgunDomain` | Sender domain for emails (should be verified through Mailgun) | - | -| `app.defaultDIDFinderScheme` | DID Finder scheme if none provided in request. If left blank, template will attempt to guess. | - | -| `app.validateTransformerImage` | Should docker image name be validated at DockerHub? | `true` | - | `app.defaultUsers` | Name of secret holding json file with default users to create on deployment | - | -| `didFinder.rucio.enabled` | Should we deploy the Rucio DID Finder? | `true` | -| `didFinder.rucio.image` | Rucio DID Finder image name | `sslhep/servicex-did-finder` | -| `didFinder.rucio.tag` | Rucio DID Finder image tag | `latest` | -| `didFinder.rucio.pullPolicy` | Rucio DID Finder image pull policy | `Always` | -| `didFinder.rucio.servicex_latitude` | Latitude of the computing center where ServiceX runs. Will be used by Rucio to return the closest input data replica. | 41.78 | -| `didFinder.rucio.servicex_longitude` | Longitude of the computing center where ServiceX runs. Will be used by Rucio to return the closest input data replica. | -87.7 | -| `didFinder.rucio.reportLogicalFiles` | For CMS xCache sites, we don't want the replicas, only logical names. Set to true to get this behavior | false | -| `didFinder.rucio.rucio_host` | URL for Rucio service to use | `https://voatlasrucio-server-prod.cern.ch:443` | -| `didFinder.rucio.auth _host` | URL to obtain Rucio authentication | `https://voatlasrucio-auth-prod.cern.ch:443` | - -| `didFinder.CERNOpenData.enabled` | Should we deploy the CERN OpenData DID Finder? `true` | -| `didFinder.CERNOpenData.image` | CERN OpenData DID Finder image name | `sslhep/servicex-did-finder` | -| `didFinder.CERNOpenData.tag` | CERN OpenData DID Finder image tag | `latest` | -| `didFinder.CERNOpenData.pullPolicy` | CERN OpenData DID Finder image pull policy | `Always` | -| `codegen.atlasxaod.enabled` | Deploy the ATLAS xAOD Code generator? | true | -| `codegen.atlasxaod.image` | Code generator image | `sslhep/servicex_code_gen_func_adl_xaod` | -| `codegen.atlasxaod.pullPolicy` | | true | -| `codegen.atlasxaod.tag` | Code generator image tag | develop | -| `codegen.atlasxaod.defaultScienceContainerImage` | The transformer image that should be run against this generated code | `sslhep/servicex_func_adl_xaod_transformer` | -| `codegen.atlasxaod.defaultScienceContainerTag` | Tag for the transformer image that should be run against this generated code | develop | -|`codegen.uproot.enabled` | Deploy the uproot code generator? - also all of the code gen settings, above are available | true | -|`codegen.cms.enabled` | Deploy the CMS AOD code generator? - also all of the code gen settings, above are available | true | -|`codegen.python.enabled` | Deploy the python uproot code generator? - also all of the code gen settings, above are available | true | -| `x509Secrets.image` | X509 Secret Service image name | `sslhep/x509-secrets` | -| `x509Secrets.tag` | X509 Secret Service image tag | `latest` | -| `x509Secrets.pullPolicy` | X509 Secret Service image pull policy | `Always` | -| `x509Secrets.vomsOrg` | Which VOMS org to contact for proxy? | `atlas` | -| `x509Secrets.initImage` | X509 Secret Service init container image | `alpine:3.6` | -| `rbacEnabled` | Specify if rbac is enabled in your cluster | `true` | -| `hostMount` | Optional path to mount in transformers as /data | - | -| `gridAccount` | CERN User account name to access Rucio | - | -| `noCerts` | Set to true to disable x509 certs and only use open data | false | -| `rabbitmq.password` | Override the generated RabbitMQ password | leftfoot1 | -| `objectstore.enabled` | Deploy a minio object store with Servicex? | true | -| `objectstore.internal` | Deploy a captive minio instance with this chart? | true | -| `objectstore.publicURL` | What URL should the client use to download files? If set, this is given whether ingress is enabled or not | nil | -| `postgres.enabled` | Deploy a postgres database into cluster? If not, we use a sqllite db | false | -| `minio.auth.rootUser` | Username to log into minio | miniouser | -| `minio.auth.rootPassword` | Password key to log into minio | leftfoot1 | -| `minio.apiIngress.enabled` | Should minio chart deploy an ingress to the service? | false | -| `minio.apiIngress.hostname` | Hostname associate with ingress controller | nil | -| `transformer.cachePrefix` | Prefix string to stick in front of file paths. Useful for XCache | | -| `transformer.autoscaler.enabled` | Enable/disable horizontal pod autoscaler for transformers | True | -| `transformer.autoscaler.cpuScaleThreshold` | CPU percentage threshold for pod scaling | 30 | -| `transformer.autoscaler.minReplicas` | Minimum number of transformer pods per request | 1 | -| `transformer.autoscaler.maxReplicas` | Maximum number of transformer pods per request | 20 | -| `transformer.pullPolicy` | Pull policy for transformer pods (Image name specified in REST Request) | Always | -| `transformer.priorityClassName` | priorityClassName for transformer pods (Not setting it means getting global default) | Not Set | -| `transformer.cpuLimit` | Set CPU resource limit for pod in number of cores | 1 | -| `transformer.sidecarImage` | Image name for the transformer sidecar container that hold the serviceX code | 'sslhep/servicex_sidecar_transformer' | -| `transformer.sidecarTag` | Tag for the sidecar container | 'develop' | -| `transformer.sidecarPullPolicy` | Pull Policy for the sidecar container | 'Always' | -| `transformer.persistence.existingClaim` | Existing persistent volume claim | nil | -| `transformer.subdir` | Subdirectory of the mount to write transformer results to (should end with trailing /) | nil | -| `minioCleanup.enabled` | Enable deployment of minio cleanup service | false | -| `minioCleanup.image` | Default image for minioCleanup cronjob | `sslhep/servicex_minio_cleanup` | -| `minioCleanup.tag` | minioCleanup image tag | | -| `minioCleanup.pullPolicy` | minioCleanup image pull policy | `Always` | -| `minioCleanup.threads` | Number of threads to use when processing S3 Storage | 6 | -| `minioCleanup.logLevel` | Log level to use for logging (e.g. DEBUG, INFO, WARN, ERROR, FATAL) | INFO | +| `secrets` | Name of a secret deployed into the cluster. Must follow example_secrets.yaml | - | +| `logging.logstash.enabled` | Enable remote logging | true | +| `logging.logstash.host` | Host running logstash listening for log data| `servicex.atlas-ml.org` | +| `logging.logstash.port` | Port to send logging to| 5959 | +| `logging.logstash.protocol` | Protocol to be used (options are TCP and UDP) | TCP | +| `logging.logstash.monitor` | Link to be shown inside Monitor web page iframe | UC Kibana dashboard | +| `logging.logstash.logs` | Link to be shown inside Logs web page iframe| UC Kibana dashboard | +| `app.image` | ServiceX_App image name| `sslhep/servicex_app` | +| `app.tag` | ServiceX image tag | `latest` | +| `app.logLevel` | Logging level for ServiceX web app (uses standard unix levels) | `WARNING` | +| `app.pullPolicy` | ServiceX image pull policy | `Always` | +| `app.checksImage` | ServiceX init container image for checks | `ncsa/checks:latest` | +| `app.rabbitmq.retries`| Number of times to retry connecting to RabbitMQ on startup | 12 | +| `app.rabbitmq.retry_interval` | Number of seconds to wait between RabbitMQ retries on startup | 10 | +| `app.replicas` | Number of App pods to start. Experimental! | 1 | +| `app.auth` | Enable authentication or allow unfettered access (Python boolean string) | `false` | +| `app.globusClientID` | Globus application Client ID | - | +| `app.globusClientSecret` | Globus application Client Secret | - | +| `app.adminEmail` | Email address for initial admin user | | +| `app.tokenExpires` | Seconds until the ServiceX API tokens (JWT refresh tokens) expire| False (never) | +| `app.authExpires` | Seconds until the JWT access tokens expire | 21600 (six hours) | +| `app.ingress.enabled` | Enable install of ingress | false| +| `app.ingress.class` | Class to be set in `kubernetes.io/ingress.class` annotation | nginx| +| `app.ingress.host` | Hostname to associate ingress with | servicex.ssl-hep.org | +| `app.ingress.defaultBackend` | Name of a service to send requests to internal endpoints to | default-http-backend | +| `app.ingress.tls.enabled` | Enable TLS for ServiceX API Ingress resource| false| +| `app.ingress.tls.secretName` | Name of TLS Secret used for ServiceX API server | `{{.Release.Name}}-app-tls` | +| `app.ingress.tls.clusterIssuer` | Specify a ClusterIssuer if using cert-manager | - | +| `app.resources` | Pass in Kubernetes pod resource spec to deployment to change CPU and memory | { } | +| `app.slackSigningSecret` | Signing secret for Slack application | - | +| `app.newSignupWebhook`| Slack webhook URL for new signups | - | +| `app.mailgunApiKey` | API key to send Mailgun emails to newly approved users | - | +| `app.mailgunDomain` | Sender domain for emails (should be verified through Mailgun) | - | +| `app.defaultDIDFinderScheme` | DID Finder scheme if none provided in request. If left blank, template will attempt to guess. | - | +| `app.validateTransformerImage` | Should docker image name be validated at DockerHub? | `true` | + | `app.defaultUsers` | Name of secret holding json file with default users to create on deployment | - | +| `didFinder.rucio.enabled` | Should we deploy the Rucio DID Finder? | `true` | +| `didFinder.rucio.image` | Rucio DID Finder image name | `sslhep/servicex-did-finder` | +| `didFinder.rucio.tag` | Rucio DID Finder image tag | `latest` | +| `didFinder.rucio.pullPolicy` | Rucio DID Finder image pull policy | `Always` | +| `didFinder.rucio.servicex_latitude` | Latitude of the computing center where ServiceX runs. Will be used by Rucio to return the closest input data replica. | 41.78| +| `didFinder.rucio.servicex_longitude` | Longitude of the computing center where ServiceX runs. Will be used by Rucio to return the closest input data replica. | -87.7| +| `didFinder.rucio.reportLogicalFiles` | For CMS xCache sites, we don't want the replicas, only logical names. Set to true to get this behavior | false| +| `didFinder.rucio.rucio_host` | URL for Rucio service to use | `https://voatlasrucio-server-prod.cern.ch:443` | +| `didFinder.rucio.auth _host` | URL to obtain Rucio authentication | `https://voatlasrucio-auth-prod.cern.ch:443` | +| `didFinder.rucio.memcache.enabled` | Should use memcache to store results returned by the DID lookup? | true | +| `didFinder.rucio.memcache.image` | Docker image for memcache | memcached | +| `didFinder.rucio.memcache.tag` | Tag of the memcache image | alpine | +| `didFinder.rucio.memcache.ttl` | How long should memcache results be considered valid (in seconds)| 86400| +| `didFinder.CERNOpenData.enabled` | Should we deploy the CERN OpenData DID Finder? | `true` | +| `didFinder.CERNOpenData.image` | CERN OpenData DID Finder image name | `sslhep/servicex-did-finder` | +| `didFinder.CERNOpenData.tag` | CERN OpenData DID Finder image tag | `latest` | +| `didFinder.CERNOpenData.pullPolicy` | CERN OpenData DID Finder image pull policy | `Always` | +| `codegen.atlasxaod.enabled` | Deploy the ATLAS xAOD Code generator? | true | +| `codegen.atlasxaod.image` | Code generator image | `sslhep/servicex_code_gen_func_adl_xaod` | +| `codegen.atlasxaod.pullPolicy` | | true | +| `codegen.atlasxaod.tag` | Code generator image tag | develop | +| `codegen.atlasxaod.defaultScienceContainerImage` | The transformer image that should be run against this generated code | `sslhep/servicex_func_adl_xaod_transformer` | +| `codegen.atlasxaod.defaultScienceContainerTag` | Tag for the transformer image that should be run against this generated code | develop | +|`codegen.uproot.enabled` | Deploy the uproot code generator? - also all of the code gen settings, above are available | true | +|`codegen.cms.enabled` | Deploy the CMS AOD code generator? - also all of the code gen settings, above are available | true | +|`codegen.python.enabled` | Deploy the python uproot code generator? - also all of the code gen settings, above are available | true | +| `x509Secrets.image` | X509 Secret Service image name | `sslhep/x509-secrets` | +| `x509Secrets.tag` | X509 Secret Service image tag | `latest` | +| `x509Secrets.pullPolicy` | X509 Secret Service image pull policy | `Always` | +| `x509Secrets.vomsOrg` | Which VOMS org to contact for proxy? | `atlas` | +| `x509Secrets.initImage` | X509 Secret Service init container image | `alpine:3.6` | +| `rbacEnabled` | Specify if rbac is enabled in your cluster | `true` | +| `hostMount` | Optional path to mount in transformers as /data | - | +| `gridAccount` | CERN User account name to access Rucio | - | +| `noCerts` | Set to true to disable x509 certs and only use open data | false| +| `rabbitmq.auth.password` | Override the generated RabbitMQ password | leftfoot1 | +| `rabbitmq.apiIngress.enabled` | Should RMQ chart deploy an ingress to the service? | false | +| `rabbitmq.apiIngress.host` | RMQ ingress hostname | servicex-rmq.ssl-hep.org | +| `rabbitmq.apiIngress.class` | RMQ ingress class | nginx | +| `objectstore.enabled` | Deploy a minio object store with Servicex? | true | +| `objectstore.internal`| Deploy a captive minio instance with this chart? | true | +| `objectstore.publicURL` | What URL should the client use to download files? If set, this is given whether ingress is enabled or not | nil | +| `postgres.enabled` | Deploy a postgres database into cluster? If not, we use a sqllite db | false| +| `minio.auth.rootUser` | Username to log into minio | miniouser | +| `minio.auth.rootPassword` | Password key to log into minio | leftfoot1 | +| `minio.apiIngress.enabled` | Should minio chart deploy an ingress to the service? | false| +| `minio.apiIngress.hostname` | Hostname associate with ingress controller | nil | +| `transformer.cachePrefix` | Prefix string to stick in front of file paths. Useful for XCache | | +| `transformer.autoscaler.enabled` | Enable/disable horizontal pod autoscaler for transformers | True | +| `transformer.autoscaler.cpuScaleThreshold` | CPU percentage threshold for pod scaling | 30 | +| `transformer.autoscaler.minReplicas` | Minimum number of transformer pods per request | 1 | +| `transformer.autoscaler.maxReplicas` | Maximum number of transformer pods per request | 20 | +| `transformer.pullPolicy` | Pull policy for transformer pods (Image name specified in REST Request) | Always | +| `transformer.priorityClassName` | priorityClassName for transformer pods (Not setting it means getting global default) | Not Set | +| `transformer.cpuLimit`| Set CPU resource limit for pod in number of cores | 1 | +| `transformer.sidecarImage` | Image name for the transformer sidecar container that hold the serviceX code | 'sslhep/servicex_sidecar_transformer' | +| `transformer.sidecarTag` | Tag for the sidecar container | 'develop' | +| `transformer.sidecarPullPolicy` | Pull Policy for the sidecar container | 'Always' | +| `transformer.persistence.existingClaim` | Existing persistent volume claim | nil | +| `transformer.subdir` | Subdirectory of the mount to write transformer results to (should end with trailing /) | nil | +| `minioCleanup.enabled`| Enable deployment of minio cleanup service | false| +| `minioCleanup.image` | Default image for minioCleanup cronjob | `sslhep/servicex_minio_cleanup` | +| `minioCleanup.tag` | minioCleanup image tag | | +| `minioCleanup.pullPolicy` | minioCleanup image pull policy | `Always` | +| `minioCleanup.threads`| Number of threads to use when processing S3 Storage | 6 | +| `minioCleanup.logLevel` | Log level to use for logging (e.g. DEBUG, INFO, WARN, ERROR, FATAL) | INFO | | `minioCleanup.schedule` | Schedule for minioCleanup cronjob. See [reference](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntax) for details on fields | `* */8 * * *` (every 8 hours) | -| `minioCleanup.maxAge` | Max age in days before removing results | 30 | -| `minioCleanup.maxSize` | Start removing buckets when total space used reaches this number (can use G,M, T suffixes) | '1G' | -| `minioCleanup.normSize` | Size at which to stop removing buckets | '700M' | | ->>>>>>> origin +| `minioCleanup.maxAge` | Max age in days before removing results | 30 | +| `minioCleanup.maxSize`| Start removing buckets when total space used reaches this number (can use G,M, T suffixes) | '1G' | +| `minioCleanup.normSize` | Size at which to stop removing buckets | '700M' | | diff --git a/docs/development/contributing.md b/docs/development/contributing.md index e96ad2e8c..ec3e0370e 100644 --- a/docs/development/contributing.md +++ b/docs/development/contributing.md @@ -4,10 +4,10 @@ Welcome to the ServiceX contributor guide, and thank you for your interest in co ## Overview -ServiceX uses a microservice architecture, -and is designed to be hosted on a Kubernetes cluster. -The ServiceX project uses a polyrepo strategy for source code management: -the source code for each microservice is located in a dedicated repo. +ServiceX uses a microservice architecture, +and is designed to be hosted on a Kubernetes cluster. +The ServiceX project uses a polyrepo strategy for source code management: +the source code for each microservice is located in a dedicated repo. Below is a partial list of these repositories: @@ -21,65 +21,81 @@ Please read our [architecture document](https://servicex.readthedocs.io/en/lates ## Branching Strategy -ServiceX uses a slightly modified GitLab flow. Each repository has a main branch, usually named `develop` (or `master` for the Python frontend). All changes should be made on feature branches and submitted as PRs to the main branch. Releases are frozen on dedicated release branches, e.g. `v1.0.0-RC.2`. +ServiceX uses a slightly modified GitLab flow. Each repository has a main branch, usually named `develop` (or `master` for the Python frontend). All changes should be made on feature branches and submitted as PRs to the main branch. Releases are frozen on dedicated release branches, e.g. `v1.0.0-RC.2`. ## Development Workflow 1. Set up a local development environment: - - Decide which microservice (or Helm chart) you'd like to change, - and locate the corresponding repository. - - If you are a not a member of the `ssl-hep` GitHub organization, + - Decide which microservice (or Helm chart) you'd like to change, + and locate the corresponding repository. + - If you are a not a member of the `ssl-hep` GitHub organization, fork the repository. - Clone the (forked) repository to your local machine: + ``` git clone git@github.com:/ServiceX_App.git ``` + - If you created a fork, add the upstream repository as remote: + ``` git remote add upstream git@github.com:ssl-hep/ServiceX_App.git ``` + - Set up a new environment via ``conda`` or ``virtualenv``. - Install dependencies, including test dependencies: + ``` python3 -m pip install -e .[test] ``` + - If the root directory contains a file named `.pre-commit-config.yaml`, you can install the [pre-commit](https://pre-commit.com/) hooks with: + ``` pip install pre-commit pre-commit install ``` + 1. Develop your contribution: - Pull latest changes from upstream: + ``` git checkout develop git pull upstream develop ``` + - Create a branch for the feature you want to work on: + ``` git checkout -b fix-issue-99 ``` + - Commit locally as you progress with `git add` and `git commit`. 1. Test your changes: - Run the full test suite with `python -m pytest`, or target specific test files with `python -m pytest tests/path/to/file.py`. - Please write new unit tests to cover any changes you make. - You can also manually test microservice changes against a full ServiceX deployment by building the Docker image, pushing it to DockerHub, and setting the `image` and `tag` values as follows: + ```yaml app: image: / tag: my-feature-branch - ``` - - For more details, please read our full - [deployment guide](https://servicex.readthedocs.io/en/latest/deployment/basic). + ``` + + - For more details, please read our full + [deployment guide](https://servicex.readthedocs.io/en/latest/deployment/basic). 1. Submit a pull request to the upstream repository. ## Issues + Please submit issues for bugs and feature requests to the [main ServiceX repository](https://github.com/ssl-hep/ServiceX), unless the issue is specific to a single microservice. We manage project priorities with a [ZenHub board](https://app.zenhub.com/workspaces/servicex-5caba4288d0ceb76ea94ae1f/board?repos=180217333,180236972,185614791,182823774,202592339). ## Join us on Slack + We coordinate our efforts on the [IRIS-HEP Slack](http://iris-hep.slack.com). Come join this intellectual hub! @@ -131,17 +147,18 @@ The best way to work on ServiceX is using the unit tests. That isn't always poss 1. Finally restart the pod, which should cause it to pick up the new build. This might kill a port-forward you have in place, so don't forget to restart that! ## Debugging Tips -Microservice architectures can be difficult to test and debug. Here are some + +Microservice architectures can be difficult to test and debug. Here are some helpful hints to make this easier. 1. Instead of relying on the DID Finder to locate some particular datafile, you -can mount one of your local directories into the transformer pod and then +can mount one of your local directories into the transformer pod and then instruct the DID Finder to always offer up the path to that file regardless of the submitted DID. You can use the `hostMount` value to have a local directory -mounted into each transformer pod under `/data`. You can use the +mounted into each transformer pod under `/data`. You can use the `didFinder.staticFile` value to instruct DID Finder to offer up a file from that directory. -2. You can use port-forwarding to expose port 15672 from the RabbitMQ pod to +2. You can use port-forwarding to expose port 15672 from the RabbitMQ pod to your laptop and log into the Rabbit admin console using the username: `user` and password `leftfoot1`. From here you can monitor the queues, purge old messages and inject your own messages @@ -150,4 +167,4 @@ and inject your own messages ### Hotfixes -If a critical bugfix or hotfix must be applied to a previous release, it should be merged to the main branch and then applied to each affected release branch using `git cherry-pick -m 1`. Merge commits have 2 parents, so the `-m 1` flag is used to specify that the first parent (i.e. previous commit on the main branch) should be used +If a critical bugfix or hotfix must be applied to a previous release, it should be merged to the main branch and then applied to each affected release branch using `git cherry-pick -m 1`. Merge commits have 2 parents, so the `-m 1` flag is used to specify that the first parent (i.e. previous commit on the main branch) should be used diff --git a/docs/example_secrets.yaml b/docs/example_secrets.yaml index fade39e8f..d37855d3e 100644 --- a/docs/example_secrets.yaml +++ b/docs/example_secrets.yaml @@ -1,43 +1,46 @@ { "kind": "SealedSecret", "apiVersion": "bitnami.com/v1alpha1", - "metadata": { - "name": "servicex-secrets", - "namespace": "servicex-namespace", - "creationTimestamp": null, - "annotations": { - "sealedsecrets.bitnami.com/namespace-wide": "true" - } - }, - "spec": { - "template": { - "metadata": { - "name": "servicex-secrets", - "namespace": "servicex-namespace", - "creationTimestamp": null, - "annotations": { - "sealedsecrets.bitnami.com/managed": "true", - "sealedsecrets.bitnami.com/namespace-wide": "true" - } - }, - "type": "Opaque", - "data": null + "metadata": + { + "name": "servicex-secrets", + "namespace": "servicex-namespace", + "creationTimestamp": null, + "annotations": { "sealedsecrets.bitnami.com/namespace-wide": "true" }, + }, + "spec": + { + "template": + { + "metadata": + { + "name": "servicex-secrets", + "namespace": "servicex-namespace", + "creationTimestamp": null, + "annotations": + { + "sealedsecrets.bitnami.com/managed": "true", + "sealedsecrets.bitnami.com/namespace-wide": "true", + }, + }, + "type": "Opaque", + "data": null, + }, + "encryptedData": + { + "accesskey": "aaa", + "flaskSecretKey": "aaa", + "globusClientID": "aaa", + "globusClientSecret": "aaa", + "jwtSecretKey": "aaa", + "mailgunAPIKey": "aaa", + "postgresql-password": "aaa", + "rabbitmq-password": "aaa", + "root-password": "aaa", + "root-user": "aaa", + "secretkey": "aaa", + "slackSigningSecret": "aaa", + "slackSignupWebhook": "aaa", + }, }, - "encryptedData": { - "accesskey": "aaa", - "flaskSecretKey": "aaa", - "globusClientID": "aaa", - "globusClientSecret": "aaa", - "jwtSecretKey": "aaa", - "mailgunAPIKey": "aaa", - "postgresql-password": "aaa", - "rabbitmq-erlang-cookie": "aaa", - "rabbitmq-password": "aaa", - "root-password": "aaa", - "root-user": "aaa", - "secretkey": "aaa", - "slackSigningSecret": "aaa", - "slackSignupWebhook": "aaa" - } - } } diff --git a/helm/example_secrets.yaml b/helm/example_secrets.yaml index 7d98b463e..047b68f9c 100644 --- a/helm/example_secrets.yaml +++ b/helm/example_secrets.yaml @@ -16,5 +16,4 @@ data: accesskey: <> secretkey: <> rabbitmq-password: << rabbitMQ password >> - rabbitmq-erlang-cookie: << rabbitMQ erlang cookie >> postgresql-password: << postgresql password for postgres user >> diff --git a/helm/servicex/templates/app/rmq_ingress.yaml b/helm/servicex/templates/app/rmq_ingress.yaml new file mode 100644 index 000000000..a98b5077a --- /dev/null +++ b/helm/servicex/templates/app/rmq_ingress.yaml @@ -0,0 +1,32 @@ +{{- if .Values.rabbitmq.apiIngress.enabled -}} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: {{ .Values.rabbitmq.apiIngress.class }} + {{- if .Values.rabbitmq.apiIngress.tls.clusterIssuer }} + cert-manager.io/cluster-issuer: {{ .Values.rabbitmq.apiIngress.tls.clusterIssuer }} + acme.cert-manager.io/http01-edit-in-place: "true" + {{- end }} + labels: + app: {{ .Release.Name }}-rmq-servicex + name: {{ .Release.Name }}-rmq-servicex +spec: + {{- if .Values.rabbitmq.apiIngress.tls.enabled }} + tls: + - hosts: + - {{ .Release.Name }}.{{ .Values.rabbitmq.apiIngress.host }} + secretName: {{ tpl .Values.rabbitmq.apiIngress.tls.secretName . }} + {{- end }} + rules: + - host: {{ .Release.Name }}.{{ .Values.rabbitmq.apiIngress.host }} + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: {{ .Release.Name }}-rabbitmq + port: + number: 5672 +{{- end }} diff --git a/helm/servicex/templates/app/rmq_ingress_inst.yaml b/helm/servicex/templates/app/rmq_ingress_inst.yaml new file mode 100644 index 000000000..fe21dc804 --- /dev/null +++ b/helm/servicex/templates/app/rmq_ingress_inst.yaml @@ -0,0 +1,21 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: nginx + labels: + app: servicex-rmq-servicex + name: servicex-rmq-servicex +spec: + rules: + # - host: servicex.rmq.ssl-hep.org + - host: servicex.rmq.af.uchicago.edu + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: servicex-rabbitmq + port: + number: 5672 diff --git a/helm/servicex/values.yaml b/helm/servicex/values.yaml index 310b32f60..87539272d 100644 --- a/helm/servicex/values.yaml +++ b/helm/servicex/values.yaml @@ -115,6 +115,14 @@ postgresql: persistence: enabled: false rabbitmq: + apiIngress: + enabled: false + class: nginx + host: rmq.ssl-hep.org + tls: + enabled: false + clusterIssuer: null + secretName: "{{.Release.Name}}-rmq-tls" auth: password: leftfoot1 persistence: @@ -139,7 +147,7 @@ transformer: scienceContainerPullPolicy: Always language: python - exec: # replace me + exec: # replace me outputDir: /servicex/output persistence: diff --git a/lite/.dockerignore b/lite/.dockerignore new file mode 100644 index 000000000..911db4120 --- /dev/null +++ b/lite/.dockerignore @@ -0,0 +1,2 @@ +kube +.github diff --git a/lite/.github/workflows/ci.yaml b/lite/.github/workflows/ci.yaml new file mode 100644 index 000000000..2b5a2c403 --- /dev/null +++ b/lite/.github/workflows/ci.yaml @@ -0,0 +1,37 @@ +name: CI/CD + +on: + push: + branches: + - "*" + tags: + - "*" + pull_request: + +jobs: + test: + strategy: + matrix: + python-version: ["3.10"] + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4.1.1 + publish: + runs-on: ubuntu-latest + needs: test + steps: + - uses: actions/checkout@v4.1.1 + + - name: Extract tag name + shell: bash + run: echo "##[set-output name=imagetag;]$(echo ${GITHUB_REF##*/})" + id: extract_tag_name + + - name: Build Docker Image + uses: elgohr/Publish-Docker-Github-Action@v5 + with: + name: sslhep/servicex-lite:${{ steps.extract_tag_name.outputs.imagetag }} + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + # tag: "${GITHUB_REF##*/}" diff --git a/lite/.gitignore b/lite/.gitignore new file mode 100644 index 000000000..265cff59c --- /dev/null +++ b/lite/.gitignore @@ -0,0 +1,3 @@ +.venv +kubeconfig-secret* +test* \ No newline at end of file diff --git a/lite/Dockerfile b/lite/Dockerfile new file mode 100644 index 000000000..46fa0f365 --- /dev/null +++ b/lite/Dockerfile @@ -0,0 +1,27 @@ +FROM python:3.11 AS builder + +RUN useradd -ms /bin/bash sxlite + +COPY pyproject.toml poetry.lock /home/sxlite/ +WORKDIR /home/sxlite + +FROM builder as poetry +ENV POETRY_HOME=/home/sxlite +ENV POETRY_VIRTUALENVS_IN_PROJECT=true +ENV PATH="$POETRY_HOME/bin:$PATH" +RUN python -c 'from urllib.request import urlopen; print(urlopen("https://install.python-poetry.org").read().decode())' | python - +COPY resources ./ +RUN poetry install --no-interaction --no-ansi -vvv + +FROM builder AS runtime + +COPY --from=poetry /home/sxlite /home/sxlite +WORKDIR /home/sxlite +RUN mkdir ./sxlite +COPY scripts/*.py resources/start.sh ./ + +RUN chmod +x start.sh + +USER sxlite + +ENTRYPOINT ["/home/sxlite/start.sh"] diff --git a/lite/README.md b/lite/README.md new file mode 100644 index 000000000..9aadfc4a5 --- /dev/null +++ b/lite/README.md @@ -0,0 +1,16 @@ +# ServiceX Lite + +This service runs a simple code that listens for the transformation request created in the "master" servicex and deploys transforms that will do the same job as the ones in the master. + +Prerequisits: + +* Kube configs of both slave and master k8s clusters. +* Deployment of a loadbalancer in front of the master servicex RMQ +* Master servicex configmap value ADVERTISED_HOSTNAME is set to the externally accessible servicex URL. +* Master servicex ingress routes internal paths the same way as external. + +## TODO + +* add options to configure xcache, resource request, etc. +* add creation of a HPA +* if needed - an automatic patching of master's configmap and ingress routes. diff --git a/lite/kube/deployment.yaml b/lite/kube/deployment.yaml new file mode 100644 index 000000000..0c8287306 --- /dev/null +++ b/lite/kube/deployment.yaml @@ -0,0 +1,62 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: servicex-lite + namespace: servicex-lite + labels: + k8s-app: servicex-lite +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: servicex-lite + template: + metadata: + labels: + k8s-app: servicex-lite + spec: + containers: + - name: servicex-lite + image: sslhep/servicex_lite:latest + imagePullPolicy: Always + env: + - name: ORIGIN_CONTEXT + value: af-admin@af + - name: ORIGIN_NAMESPACE + value: servicex + - name: SXLITE_CONTEXT + value: kubernetes-admin@cluster.local + - name: SXLITE_NAMESPACE + value: servicex-lite + - name: RMQ_USER + value: "user" + - name: RMQ_HOST + value: "192.170.241.253" + - name: INITIAL_PODS + value: "15" + - name: MAX_PODS + value: "1000" + - name: PYTHONUNBUFFERED + value: "true" + # - name: XCACHE only if you want to overwrite xcache + # value: "" + - name: EXTRA_ANNOTATIONS + value: "" + - name: HPA_VERSION + value: "v2" + - name: HPA_CPU_UTILIZATION + value: "10" + volumeMounts: + - name: kubeconfig + mountPath: /home/sxlite/.kube/ + resources: + requests: + memory: "256Mi" + cpu: "250m" + limits: + memory: "512Mi" + cpu: "500m" + volumes: + - name: kubeconfig + secret: + secretName: kubeconfig-secret diff --git a/lite/kube/lb.yaml b/lite/kube/lb.yaml new file mode 100644 index 000000000..b4683393c --- /dev/null +++ b/lite/kube/lb.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Service +metadata: + name: rmq-lb-service +spec: + selector: + app.kubernetes.io/name: rabbitmq + ports: + - port: 5672 + targetPort: 5672 + type: LoadBalancer diff --git a/lite/poetry.lock b/lite/poetry.lock new file mode 100644 index 000000000..7cfdacf51 --- /dev/null +++ b/lite/poetry.lock @@ -0,0 +1,407 @@ +# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. + +[[package]] +name = "cachetools" +version = "5.3.2" +description = "Extensible memoizing collections and decorators" +optional = false +python-versions = ">=3.7" +files = [ + {file = "cachetools-5.3.2-py3-none-any.whl", hash = "sha256:861f35a13a451f94e301ce2bec7cac63e881232ccce7ed67fab9b5df4d3beaa1"}, + {file = "cachetools-5.3.2.tar.gz", hash = "sha256:086ee420196f7b2ab9ca2db2520aca326318b68fe5ba8bc4d49cca91add450f2"}, +] + +[[package]] +name = "certifi" +version = "2023.7.22" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.6" +files = [ + {file = "certifi-2023.7.22-py3-none-any.whl", hash = "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"}, + {file = "certifi-2023.7.22.tar.gz", hash = "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"}, +] + +[[package]] +name = "charset-normalizer" +version = "3.3.2" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, + {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, +] + +[[package]] +name = "google-auth" +version = "2.23.4" +description = "Google Authentication Library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google-auth-2.23.4.tar.gz", hash = "sha256:79905d6b1652187def79d491d6e23d0cbb3a21d3c7ba0dbaa9c8a01906b13ff3"}, + {file = "google_auth-2.23.4-py2.py3-none-any.whl", hash = "sha256:d4bbc92fe4b8bfd2f3e8d88e5ba7085935da208ee38a134fc280e7ce682a05f2"}, +] + +[package.dependencies] +cachetools = ">=2.0.0,<6.0" +pyasn1-modules = ">=0.2.1" +rsa = ">=3.1.4,<5" + +[package.extras] +aiohttp = ["aiohttp (>=3.6.2,<4.0.0.dev0)", "requests (>=2.20.0,<3.0.0.dev0)"] +enterprise-cert = ["cryptography (==36.0.2)", "pyopenssl (==22.0.0)"] +pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] +reauth = ["pyu2f (>=0.1.5)"] +requests = ["requests (>=2.20.0,<3.0.0.dev0)"] + +[[package]] +name = "idna" +version = "3.4" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.5" +files = [ + {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, + {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, +] + +[[package]] +name = "kubernetes" +version = "28.1.0" +description = "Kubernetes python client" +optional = false +python-versions = ">=3.6" +files = [ + {file = "kubernetes-28.1.0-py2.py3-none-any.whl", hash = "sha256:10f56f8160dcb73647f15fafda268e7f60cf7dbc9f8e46d52fcd46d3beb0c18d"}, + {file = "kubernetes-28.1.0.tar.gz", hash = "sha256:1468069a573430fb1cb5ad22876868f57977930f80a6749405da31cd6086a7e9"}, +] + +[package.dependencies] +certifi = ">=14.05.14" +google-auth = ">=1.0.1" +oauthlib = ">=3.2.2" +python-dateutil = ">=2.5.3" +pyyaml = ">=5.4.1" +requests = "*" +requests-oauthlib = "*" +six = ">=1.9.0" +urllib3 = ">=1.24.2,<2.0" +websocket-client = ">=0.32.0,<0.40.0 || >0.40.0,<0.41.dev0 || >=0.43.dev0" + +[package.extras] +adal = ["adal (>=1.0.2)"] + +[[package]] +name = "oauthlib" +version = "3.2.2" +description = "A generic, spec-compliant, thorough implementation of the OAuth request-signing logic" +optional = false +python-versions = ">=3.6" +files = [ + {file = "oauthlib-3.2.2-py3-none-any.whl", hash = "sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca"}, + {file = "oauthlib-3.2.2.tar.gz", hash = "sha256:9859c40929662bec5d64f34d01c99e093149682a3f38915dc0655d5a633dd918"}, +] + +[package.extras] +rsa = ["cryptography (>=3.0.0)"] +signals = ["blinker (>=1.4.0)"] +signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"] + +[[package]] +name = "pyasn1" +version = "0.5.0" +description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +files = [ + {file = "pyasn1-0.5.0-py2.py3-none-any.whl", hash = "sha256:87a2121042a1ac9358cabcaf1d07680ff97ee6404333bacca15f76aa8ad01a57"}, + {file = "pyasn1-0.5.0.tar.gz", hash = "sha256:97b7290ca68e62a832558ec3976f15cbf911bf5d7c7039d8b861c2a0ece69fde"}, +] + +[[package]] +name = "pyasn1-modules" +version = "0.3.0" +description = "A collection of ASN.1-based protocols modules" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +files = [ + {file = "pyasn1_modules-0.3.0-py2.py3-none-any.whl", hash = "sha256:d3ccd6ed470d9ffbc716be08bd90efbd44d0734bc9303818f7336070984a162d"}, + {file = "pyasn1_modules-0.3.0.tar.gz", hash = "sha256:5bd01446b736eb9d31512a30d46c1ac3395d676c6f3cafa4c03eb54b9925631c"}, +] + +[package.dependencies] +pyasn1 = ">=0.4.6,<0.6.0" + +[[package]] +name = "python-dateutil" +version = "2.8.2" +description = "Extensions to the standard Python datetime module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +files = [ + {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, + {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "python-logstash" +version = "0.4.8" +description = "Python logging handler for Logstash." +optional = false +python-versions = "*" +files = [ + {file = "python-logstash-0.4.8.tar.gz", hash = "sha256:d04e1ce11ecc107e4a4f3b807fc57d96811e964a554081b3bbb44732f74ef5f9"}, +] + +[[package]] +name = "pyyaml" +version = "6.0.1" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.6" +files = [ + {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, + {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, + {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, + {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, + {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, + {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, + {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, + {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, + {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, + {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, + {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, + {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, + {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, + {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, + {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, + {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, + {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, + {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, + {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, + {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, + {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, + {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, + {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, + {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, + {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, + {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, + {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, +] + +[[package]] +name = "requests" +version = "2.31.0" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.7" +files = [ + {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, + {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "requests-oauthlib" +version = "1.3.1" +description = "OAuthlib authentication support for Requests." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "requests-oauthlib-1.3.1.tar.gz", hash = "sha256:75beac4a47881eeb94d5ea5d6ad31ef88856affe2332b9aafb52c6452ccf0d7a"}, + {file = "requests_oauthlib-1.3.1-py2.py3-none-any.whl", hash = "sha256:2577c501a2fb8d05a304c09d090d6e47c306fef15809d102b327cf8364bddab5"}, +] + +[package.dependencies] +oauthlib = ">=3.0.0" +requests = ">=2.0.0" + +[package.extras] +rsa = ["oauthlib[signedtoken] (>=3.0.0)"] + +[[package]] +name = "rsa" +version = "4.9" +description = "Pure-Python RSA implementation" +optional = false +python-versions = ">=3.6,<4" +files = [ + {file = "rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7"}, + {file = "rsa-4.9.tar.gz", hash = "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21"}, +] + +[package.dependencies] +pyasn1 = ">=0.1.3" + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + +[[package]] +name = "urllib3" +version = "1.26.18" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" +files = [ + {file = "urllib3-1.26.18-py2.py3-none-any.whl", hash = "sha256:34b97092d7e0a3a8cf7cd10e386f401b3737364026c45e622aa02903dffe0f07"}, + {file = "urllib3-1.26.18.tar.gz", hash = "sha256:f8ecc1bba5667413457c529ab955bf8c67b45db799d159066261719e328580a0"}, +] + +[package.extras] +brotli = ["brotli (==1.0.9)", "brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] +secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] +socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] + +[[package]] +name = "websocket-client" +version = "1.6.4" +description = "WebSocket client for Python with low level API options" +optional = false +python-versions = ">=3.8" +files = [ + {file = "websocket-client-1.6.4.tar.gz", hash = "sha256:b3324019b3c28572086c4a319f91d1dcd44e6e11cd340232978c684a7650d0df"}, + {file = "websocket_client-1.6.4-py3-none-any.whl", hash = "sha256:084072e0a7f5f347ef2ac3d8698a5e0b4ffbfcab607628cadabc650fc9a83a24"}, +] + +[package.extras] +docs = ["Sphinx (>=6.0)", "sphinx-rtd-theme (>=1.1.0)"] +optional = ["python-socks", "wsaccel"] +test = ["websockets"] + +[metadata] +lock-version = "2.0" +python-versions = "^3.11" +content-hash = "7f394cd602279201a29d73c1df9f06122262161bf8d6ef37b1c09589207f290b" diff --git a/lite/pyproject.toml b/lite/pyproject.toml new file mode 100644 index 000000000..72f78aecb --- /dev/null +++ b/lite/pyproject.toml @@ -0,0 +1,16 @@ +[tool.poetry] +name = "serivcex_light" +version = "0.1.0" +description = "" +authors = ["Ilija Vukotic "] + +[tool.poetry.dependencies] +python = "^3.11" +kubernetes = "^28.1.0" +python-logstash = "^0.4.8" + +[tool.poetry.dev-dependencies] + +[build-system] +requires = ["poetry-core>=1.0.0"] +build-backend = "poetry.core.masonry.api" diff --git a/lite/requirements.txt b/lite/requirements.txt new file mode 100644 index 000000000..606a5c8a6 --- /dev/null +++ b/lite/requirements.txt @@ -0,0 +1,2 @@ +kubernetes +python-logstash \ No newline at end of file diff --git a/lite/resources/start.sh b/lite/resources/start.sh new file mode 100644 index 000000000..1e9d863bb --- /dev/null +++ b/lite/resources/start.sh @@ -0,0 +1,5 @@ +#!/bin/sh +PATH=.venv/bin:$PATH +. .venv/bin/activate +env +python3.11 ./sXlite.py \ No newline at end of file diff --git a/lite/scripts/sXlite.py b/lite/scripts/sXlite.py new file mode 100644 index 000000000..7cc5935f3 --- /dev/null +++ b/lite/scripts/sXlite.py @@ -0,0 +1,379 @@ +import os +import sys +import time +import base64 +from kubernetes import config, dynamic +from kubernetes.client import api_client +from kubernetes.dynamic import exceptions + +requests = {'active': [], 'new': [], 'unknown': []} + +rmq_pass = '' +rmq_user = os.getenv("RMQ_USER", 'user') +rmq_host = os.getenv("RMQ_HOST", '192.170.241.253') +initial_pods = int(os.getenv("INITIAL_PODS", '15')) +max_pods = int(os.getenv("MAX_PODS", '1000')) + +hpa_version = os.getenv("HPA_VERSION", 'v2') +hpa_cpu_utilization = int(os.getenv("HPA_CPU_UTILIZATION", '10')) + + +class cluster: + def __init__(self, context) -> None: + print(f'initializing context:{context}') + self.client = dynamic.DynamicClient( + api_client.ApiClient(configuration=config.load_kube_config(context=context)) + ) + # self.service_api = self.client.resources.get(api_version="v1", kind="Service") + self.node_api = self.client.resources.get(api_version="v1", kind="Node") + self.secret_api = self.client.resources.get(api_version="v1", kind="Secret") + self.deployment_api = self.client.resources.get(api_version="apps/v1", kind="Deployment") + self.cm_api = self.client.resources.get(api_version="v1", kind="ConfigMap") + + def getNodes(self): + for item in self.node_api.get().items: + node = self.node_api.get(name=item.metadata.name) + print(f'{node.metadata.name}') + + def clean_metadata(self, obj): + obj['metadata'].pop('ownerReferences', None) + obj['metadata'].pop('managedFields', None) + obj['metadata'].pop('creationTimestamp', None) + obj['metadata'].pop('namespace', None) + obj['metadata'].pop('resourceVersion', None) + obj['metadata'].pop('uid', None) + obj.pop('status', None) + return obj + + +class sXorigin(cluster): + def __init__(self) -> None: + context = os.environ.get('ORIGIN_CONTEXT') + namespace = os.environ.get('ORIGIN_NAMESPACE') + if not context or not namespace: + sys.exit(1) + super().__init__(context) + self.ns = namespace + + def read_secret(self, name): + print(f'reading secret: {name}') + sec = self.secret_api.get(name=name, namespace=self.ns) + self.clean_metadata(sec) + return sec + + def read_configmap(self, name): + print(f'reading configmap: {name}') + cm = self.cm_api.get(name=name, namespace=self.ns) + self.clean_metadata(cm) + return cm + + def update_requests(self): + # reset all known to unknown + requests['unknown'] = requests['active'] + requests['active'] = [] + requests['new'] = [] + + for dep in self.deployment_api.get(namespace=self.ns).items: + if dep.metadata.name.startswith('transformer'): + req_id = dep.metadata.name[12:] + if req_id in requests['unknown']: + requests['active'].append(req_id) + requests['unknown'].remove(req_id) + else: + requests['new'].append(req_id) + + def get_deployment(self, req_id): + o = self.deployment_api.get(namespace=self.ns, name=f'transformer-{req_id}') + req_id = o.metadata.name[12:] + c1 = o.spec.template.spec.containers[0] + c2 = o.spec.template.spec.containers[1] + v = o.spec.template.spec.volumes + dep = { + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": {"name": o.metadata.name}, + "spec": { + "replicas": 0, + "selector": {"matchLabels": {'app': o.spec.selector.matchLabels.app}}, + "template": { + "metadata": { + "labels": {"app": o.spec.template.metadata.labels.app}, + "annotations": {} + }, + "spec": { + "containers": [ + { + "name": c1.name, + "image": c1.image, + "command": o.spec.template.spec.containers[0].command, + "env": [ + {"name": "HOST_NAME", + "valueFrom": { + "fieldRef": {"fieldPath": "spec.nodeName"} + } + } + ], + "imagePullPolicy": "Always", + "volumeMounts": [], + "resources": { + "requests": { + "memory": "2Gi", + "cpu": "250m" + }, + "limits": { + "memory": "4Gi", + "cpu": "1" + } + }, + }, + { + "name": c2.name, + "image": c2.image, + "command": o.spec.template.spec.containers[1].command, + "env": [ + {"name": "HOST_NAME", + "valueFrom": { + "fieldRef": {"fieldPath": "spec.nodeName"} + } + } + ], + "imagePullPolicy": "Always", + "volumeMounts": [], + "resources": { + "requests": { + "memory": "2Gi", + "cpu": "250m" + }, + "limits": { + "memory": "4Gi", + "cpu": "1" + } + }, + "args": o.spec.template.spec.containers[1].args + } + ], + "volumes": [] + } + } + } + } + for e in c1.env: + if e.name == 'HOST_NAME': + continue + ta = {'name': e.name, 'value': e.value} + if "XCACHE" in os.environ and e.name == 'CACHE_PREFIX': + ta['value'] = os.getenv('XCACHE') + dep['spec']['template']['spec']['containers'][0]['env'].append(ta) + for e in c2.env: + if e.name == 'HOST_NAME': + continue + ta = {'name': e.name, 'value': e.value} + if "XCACHE" in os.environ and e.name == 'CACHE_PREFIX': + ta['value'] = os.getenv('XCACHE') + dep['spec']['template']['spec']['containers'][1]['env'].append(ta) + + for e in c1.volumeMounts: + ta = {'name': e.name, 'mountPath': e.mountPath} + dep['spec']['template']['spec']['containers'][0]['volumeMounts'].append(ta) + for e in c2.volumeMounts: + ta = {'name': e.name, 'mountPath': e.mountPath} + dep['spec']['template']['spec']['containers'][1]['volumeMounts'].append(ta) + + for e in v: + vo = {'name': e.name} + if e.emptyDir: + vo['emptyDir'] = {} + if e.secret: + s = e.secret + vo['secret'] = {'secretName': s.secretName, 'defaultMode': s.defaultMode} + if e.configMap: + cm = e.configMap + vo['configMap'] = {'name': cm.name, 'defaultMode': cm.defaultMode} + dep['spec']['template']['spec']['volumes'].append(vo) + + sidecar_arg = o['spec']['template']['spec']['containers'][0]['args'][0] + + # replace RMQ address in sidecar_args + to_replace = sidecar_arg[sidecar_arg.index('amqp://')+7: sidecar_arg.index(':5672')] + sidecar_arg = sidecar_arg.replace(to_replace, f'{rmq_user}:{rmq_pass}@{rmq_host}') + dep['spec']['template']['spec']['containers'][0]['args'] = [sidecar_arg] + + print('=========================') + # print(dep) + return dep + + def patch_master(self): + print('patch ingress if needed.') + print('patch configmap so it ADVERTIZES external URL.') + + +class sXlite(cluster): + def __init__(self) -> None: + context = os.environ.get('SXLITE_CONTEXT') + namespace = os.environ.get('SXLITE_NAMESPACE') + if not context or not namespace: + sys.exit(1) + super().__init__(context) + self.ns = namespace + self.hpa_api = self.client.resources.get( + api_version=f"autoscaling/{hpa_version}", kind="HorizontalPodAutoscaler") + + def create_secret(self, secret): + print(f'creating secret: {secret.metadata.name}') + try: + secret = self.secret_api.create(body=secret, namespace=self.ns) + print(f'created secret: {secret.metadata.name}') + except exceptions.ConflictError: + print(f'conflict creating secret: {secret.metadata.name}') + + def delete_secret(self, name): + print(f'deleting secret: {name}') + try: + self.secret_api.delete(body={}, name=name, namespace=self.ns) + print(f'deleted secret: {name}') + except exceptions.NotFoundError as e: + print('could not delete resource:', e.summary()) + + def create_configmap(self, cm): + print(f'creating configmap: {cm.metadata.name}') + try: + cm = self.cm_api.create(body=cm, namespace=self.ns) + print(f'created configmap: {cm.metadata.name}') + except exceptions.ConflictError: + print(f'conflict creating configmap: {cm.metadata.name}') + + def delete_configmap(self, name): + print(f'deleting configmap: {name}') + try: + self.cm_api.delete(body={}, name=name, namespace=self.ns) + print(f'deleted configmap: {name}') + except Exception as e: + print(f'could not delete configmap:{name}', e) + + def create_deployment(self, dep): + dep['spec']['replicas'] = initial_pods + + ea = os.getenv("EXTRA_ANNOTATIONS", "") + if ea: + ea_key = ea.split(":")[0].strip() + ea_val = ea.split(":")[1].strip() + dep['spec']['template']['metadata']['annotations'][ea_key] = ea_val + + site = {"name": "site", "value": os.environ.get('SXLITE_CONTEXT')} + dep['spec']['template']['spec']['containers'][0]['env'].append(site) + dep['spec']['template']['spec']['containers'][1]['env'].append(site) + + print(f'creating deployment: {dep}') + + try: + dep = self.deployment_api.create(body=dep, namespace=self.ns) + print(f'created deployment: {dep.metadata.name}') + except exceptions.ConflictError as e: + print('conflict creating deployment:', e.summary()) + + def delete_deployment(self, name): + print(f'deleting deployment: {name}') + try: + self.deployment_api.delete(body={}, name=name, namespace=self.ns) + print(f'deleted deployment: {name}') + except Exception as e: + print(f'could not delete deployment:{name}', e) + + def create_hpa(self, name): + print(f'creating hpa: {name}') + hpa = { + "apiVersion": f"autoscaling/{hpa_version}", + "kind": "HorizontalPodAutoscaler", + "metadata": { + "name": name, + "namespace": self.ns, + }, + "spec": { + "scaleTargetRef": { + "kind": "Deployment", + "name": f"transformer-{name}", + "apiVersion": "apps/v1" + }, + "minReplicas": initial_pods, + "maxReplicas": max_pods, + "metrics": [ + { + "type": "Resource", + "resource": { + "name": "cpu", + "target": { + "type": "Utilization", + "averageUtilization": hpa_cpu_utilization + } + } + } + + ] + } + } + try: + self.hpa_api.create(body=hpa, namespace=self.ns) + print(f'created hpa: {name}') + except exceptions.ConflictError: + print(f'conflict creating hpa: {name}') + + def delete_hpa(self, name): + print(f'deleting HPA: {name}') + try: + self.hpa_api.delete(body={}, name=name, namespace=self.ns) + print(f'deleted hpa: {name}') + except Exception as e: + print(f'could not delete hpa:{name}', e) + + +if __name__ == '__main__': + + def cleanup(): + sxl.delete_secret('grid-certs-secret') + sxl.delete_secret('servicex-secrets') + sxl.delete_secret('servicex-x509-proxy') + + def start(): + sec = sxo.read_secret('grid-certs-secret') + sxl.create_secret(sec) + + sec = sxo.read_secret('servicex-secrets') + sxl.create_secret(sec) + global rmq_pass + rmq_pass = base64.b64decode(sec.data['rabbitmq-password']).decode() + + sec = sxo.read_secret('servicex-x509-proxy') + sxl.create_secret(sec) + + sxo = sXorigin() + sxl = sXlite() + + sxo.patch_master() + cleanup() + start() + + count = 0 + while True: + sxo.update_requests() + for req_id in requests['new']: + d = sxo.get_deployment(req_id) + sxl.create_deployment(d) + cm = sxo.read_configmap(f'{req_id}-generated-source') + sxl.create_configmap(cm) + sxl.create_hpa(req_id) + requests['active'].append(req_id) + + for req_id in requests['unknown']: + sxl.delete_hpa(req_id) + sxl.delete_configmap(f'{req_id}-generated-source') + sxl.delete_deployment(f'transformer-{req_id}') + + for req_id in requests['active']: + print(f'req_id: {req_id} still active.') + + count += 1 + if not count % 720 and len(requests['active']) == 0: # replace secrets + cleanup() + start() + + time.sleep(5)