diff --git a/.claude/skills/native-trigger/SKILL.md b/.claude/skills/native-trigger/SKILL.md new file mode 100644 index 0000000000000..5952179d6a96e --- /dev/null +++ b/.claude/skills/native-trigger/SKILL.md @@ -0,0 +1,756 @@ +# Skill: Adding Native Trigger Services + +This skill provides comprehensive guidance for adding new native trigger services to Windmill. Native triggers allow external services (like Nextcloud, Google Drive, etc.) to trigger Windmill scripts/flows via webhooks or push notifications. + +## Architecture Overview + +The native trigger system consists of: + +1. **Database Layer** - PostgreSQL tables and enum types +2. **Backend Rust Implementation** - Core trait, handlers, and service modules in the `windmill-native-triggers` crate +3. **Frontend Svelte Components** - Configuration forms and UI components + +### Key Files + +| Component | Path | +|-----------|------| +| Core module with `External` trait | `backend/windmill-native-triggers/src/lib.rs` | +| Generic CRUD handlers | `backend/windmill-native-triggers/src/handler.rs` | +| Background sync logic | `backend/windmill-native-triggers/src/sync.rs` | +| OAuth/workspace integration | `backend/windmill-native-triggers/src/workspace_integrations.rs` | +| Re-export shim (windmill-api) | `backend/windmill-api/src/native_triggers/mod.rs` | +| TriggerKind enum | `backend/windmill-common/src/triggers.rs` | +| JobTriggerKind enum | `backend/windmill-common/src/jobs.rs` | +| Frontend service registry | `frontend/src/lib/components/triggers/native/utils.ts` | +| Frontend trigger utilities | `frontend/src/lib/components/triggers/utils.ts` | +| Trigger badges (icons + counts) | `frontend/src/lib/components/graph/renderers/triggers/TriggersBadge.svelte` | +| Workspace integrations UI | `frontend/src/lib/components/workspaceSettings/WorkspaceIntegrations.svelte` | +| OAuth config form component | `frontend/src/lib/components/workspaceSettings/OAuthClientConfig.svelte` | +| OpenAPI spec | `backend/windmill-api/openapi.yaml` | +| Reference: Nextcloud module | `backend/windmill-native-triggers/src/nextcloud/` | +| Reference: Google module | `backend/windmill-native-triggers/src/google/` | + +### Crate Structure + +The native trigger code lives in the `windmill-native-triggers` crate (`backend/windmill-native-triggers/`). The `windmill-api` crate re-exports everything via a shim: + +```rust +// backend/windmill-api/src/native_triggers/mod.rs +pub use windmill_native_triggers::*; +``` + +All new service modules go in `backend/windmill-native-triggers/src/`. + +--- + +## Core Concepts + +### The `External` Trait + +Every native trigger service implements the `External` trait defined in `lib.rs`: + +```rust +#[async_trait] +pub trait External: Send + Sync + 'static { + // Associated types: + type ServiceConfig: Debug + DeserializeOwned + Serialize + Send + Sync; + type TriggerData: Debug + Serialize + Send + Sync; + type OAuthData: DeserializeOwned + Serialize + Clone + Send + Sync; + type CreateResponse: DeserializeOwned + Send + Sync; + + // Constants: + const SUPPORT_WEBHOOK: bool; + const SERVICE_NAME: ServiceName; + const DISPLAY_NAME: &'static str; + const TOKEN_ENDPOINT: &'static str; + const REFRESH_ENDPOINT: &'static str; + const AUTH_ENDPOINT: &'static str; + + // Required methods: + async fn create(&self, w_id, oauth_data, webhook_token, data, db, tx) -> Result; + async fn update(&self, w_id, oauth_data, external_id, webhook_token, data, db, tx) -> Result; + async fn get(&self, w_id, oauth_data, external_id, db, tx) -> Result; + async fn delete(&self, w_id, oauth_data, external_id, db, tx) -> Result<()>; + async fn exists(&self, w_id, oauth_data, external_id, db, tx) -> Result; + async fn maintain_triggers(&self, db, workspace_id, triggers, oauth_data, synced, errors); + fn external_id_and_metadata_from_response(&self, resp) -> (String, Option); + + // Methods with defaults: + async fn prepare_webhook(&self, db, w_id, headers, body, script_path, is_flow) -> Result; + fn service_config_from_create_response(&self, data, resp) -> Option; + fn additional_routes(&self) -> axum::Router; + async fn http_client_request(&self, url, method, workspace_id, tx, db, headers, body) -> Result; +} +``` + +Key design points: +- **`update()` returns `serde_json::Value`** - the resolved service_config to store. Each service is responsible for building the final config. +- **`maintain_triggers()`** - periodic background maintenance. Each service implements its own strategy (Nextcloud: reconcile with external state; Google: renew expiring channels). +- **No `list_all()` in the trait** - services that need it (Nextcloud) implement it privately; services that don't (Google) use different maintenance strategies. +- **No `get_external_id_from_trigger_data()` or `extract_service_config_from_trigger_data()`** - removed in favor of the `maintain_triggers` pattern. + +### Create Lifecycle: Two Paths + +The `create_native_trigger` handler in `handler.rs` supports two creation flows, controlled by `service_config_from_create_response()`: + +**Path A: Short (Google pattern)** - `service_config_from_create_response()` returns `Some(config)`: +1. `create()` registers on external service +2. `external_id_and_metadata_from_response()` extracts the ID +3. `service_config_from_create_response()` builds the config directly from input data + response metadata +4. Stores trigger in DB -- done, no extra round-trip + +Use this when the external_id is known before the create call (e.g., Google generates the channel_id as a UUID upfront and includes it in the webhook URL). + +**Path B: Long (Nextcloud pattern)** - `service_config_from_create_response()` returns `None` (default): +1. `create()` registers on external service (webhook URL has no external_id yet) +2. `external_id_and_metadata_from_response()` extracts the ID +3. `update()` is called to fix the webhook URL with the now-known external_id +4. `update()` returns the resolved service_config +5. Stores trigger in DB + +Use this when the external_id is assigned by the remote service and the webhook URL needs to be corrected after creation. + +### OAuth Token Storage (Three-Table Pattern) + +OAuth tokens are stored across three tables, NOT in `workspace_integrations.oauth_data` directly: + +| Table | What's Stored | +|-------|---------------| +| `workspace_integrations` | `oauth_data` JSON with `base_url`, `client_id`, `client_secret`, `instance_shared` flag; `resource_path` pointing to the variable | +| `variable` | Encrypted `access_token` (at the path stored in `resource_path`), linked to `account` via `account` column | +| `account` | `refresh_token`, keyed by `workspace_id` + `client` (service name) + `is_workspace_integration = true` | + +The `decrypt_oauth_data()` function in `lib.rs` assembles these into a unified struct: +```rust +pub struct OAuthConfig { + pub base_url: String, + pub access_token: String, // decrypted from variable + pub refresh_token: Option, // from account table + pub client_id: String, // from oauth_data or instance settings + pub client_secret: String, // from oauth_data or instance settings +} +``` + +Instance-level sharing: when `oauth_data.instance_shared == true`, `client_id` and `client_secret` are read from global settings instead of workspace_integrations. + +### URL Resolution + +The `resolve_endpoint()` helper handles both absolute and relative OAuth URLs: + +```rust +pub fn resolve_endpoint(base_url: &str, endpoint: &str) -> String { + if endpoint.starts_with("http://") || endpoint.starts_with("https://") { + endpoint.to_string() // Google: absolute URLs + } else { + format!("{}{}", base_url, endpoint) // Nextcloud: relative paths + } +} +``` + +### ServiceName Methods + +`ServiceName` is the central registry enum. Each variant must implement these match arms: + +| Method | Purpose | +|--------|---------| +| `as_str()` | Lowercase identifier (e.g., `"google"`) | +| `as_trigger_kind()` | Maps to `TriggerKind` enum | +| `as_job_trigger_kind()` | Maps to `JobTriggerKind` enum | +| `token_endpoint()` | OAuth token endpoint (relative or absolute) | +| `auth_endpoint()` | OAuth authorization endpoint | +| `oauth_scopes()` | Space-separated OAuth scopes | +| `resource_type()` | Resource type for token storage (e.g., `"gworkspace"`) | +| `extra_auth_params()` | Extra OAuth params (e.g., Google needs `access_type=offline`, `prompt=consent`) | +| `integration_service()` | Maps to the workspace integration service (usually `*self`) | +| `TryFrom` | Parse from string | +| `Display` | Delegates to `as_str()` | + +--- + +## Step-by-Step Implementation Guide + +### Step 1: Database Migration + +Create a new migration file: `backend/migrations/YYYYMMDDHHMMSS_newservice_trigger.up.sql` + +```sql +-- Add the service to the native_trigger_service enum +ALTER TYPE native_trigger_service ADD VALUE IF NOT EXISTS 'newservice'; + +-- Add to TRIGGER_KIND enum (used for trigger tracking) +ALTER TYPE TRIGGER_KIND ADD VALUE IF NOT EXISTS 'newservice'; + +-- Add to job_trigger_kind enum (used for job tracking) +ALTER TYPE job_trigger_kind ADD VALUE IF NOT EXISTS 'newservice'; +``` + +Also create the corresponding down migration. + +### Step 2: Update windmill-common Enums + +#### `backend/windmill-common/src/triggers.rs` + +Add variant to `TriggerKind` enum, and update `to_key()` and `fmt()` implementations. + +#### `backend/windmill-common/src/jobs.rs` + +Add variant to `JobTriggerKind` enum and update the `Display` implementation. + +### Step 3: Backend Service Module + +Create a new directory: `backend/windmill-native-triggers/src/newservice/` + +#### `mod.rs` - Type Definitions + +```rust +use serde::{Deserialize, Serialize}; + +pub mod external; +// pub mod routes; // Only if you need additional service-specific routes + +/// OAuth data deserialized from the three-table pattern. +/// The actual structure is built by decrypt_oauth_data() from variable + account + workspace_integrations. +#[derive(Debug, Clone, Deserialize, Serialize)] +pub struct NewServiceOAuthData { + pub base_url: String, // from workspace_integrations.oauth_data + pub access_token: String, // decrypted from variable table + pub refresh_token: Option, // from account table + // Note: client_id and client_secret are in OAuthConfig, not here + // unless the service needs them at runtime for API calls +} + +/// Configuration provided by user when creating/updating a trigger. +/// Stored as JSON in native_trigger.service_config. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct NewServiceConfig { + // Service-specific configuration fields + pub folder_path: String, + pub file_filter: Option, +} + +/// Data retrieved from the external service about a trigger. +/// Returned by the get() method and shown in the UI. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct NewServiceTriggerData { + pub folder_path: String, + pub file_filter: Option, + // Fields that shouldn't affect service_config comparison should use #[serde(skip_serializing)] +} + +/// Response from external service when creating a trigger/webhook. +#[derive(Debug, Deserialize)] +pub struct CreateTriggerResponse { + pub id: String, +} + +/// Handler struct (stateless, used for routing) +#[derive(Copy, Clone)] +pub struct NewService; +``` + +#### `external.rs` - External Trait Implementation + +```rust +use async_trait::async_trait; +use reqwest::Method; +use sqlx::PgConnection; +use std::collections::HashMap; +use windmill_common::{ + error::{Error, Result}, + BASE_URL, DB, +}; + +use crate::{ + generate_webhook_service_url, External, NativeTrigger, NativeTriggerData, ServiceName, + sync::{SyncError, TriggerSyncInfo}, +}; +use super::{NewService, NewServiceConfig, NewServiceOAuthData, NewServiceTriggerData, CreateTriggerResponse}; + +#[async_trait] +impl External for NewService { + type ServiceConfig = NewServiceConfig; + type TriggerData = NewServiceTriggerData; + type OAuthData = NewServiceOAuthData; + type CreateResponse = CreateTriggerResponse; + + const SERVICE_NAME: ServiceName = ServiceName::NewService; + const DISPLAY_NAME: &'static str = "New Service"; + const SUPPORT_WEBHOOK: bool = true; + const TOKEN_ENDPOINT: &'static str = "/oauth/token"; + const REFRESH_ENDPOINT: &'static str = "/oauth/token"; + const AUTH_ENDPOINT: &'static str = "/oauth/authorize"; + + async fn create( + &self, + w_id: &str, + oauth_data: &Self::OAuthData, + webhook_token: &str, + data: &NativeTriggerData, + db: &DB, + tx: &mut PgConnection, + ) -> Result { + let base_url = &*BASE_URL.read().await; + + // external_id is None during create (we get it from the response) + let webhook_url = generate_webhook_service_url( + base_url, w_id, &data.script_path, data.is_flow, + None, Self::SERVICE_NAME, webhook_token, + ); + + let url = format!("{}/api/webhooks/create", oauth_data.base_url); + let payload = serde_json::json!({ + "callback_url": webhook_url, + "folder_path": data.service_config.folder_path, + }); + + let response: CreateTriggerResponse = self + .http_client_request(&url, Method::POST, w_id, tx, db, None, Some(&payload)) + .await?; + + Ok(response) + } + + /// Update returns the resolved service_config as JSON. + /// For services using the update+get pattern, call self.get() and serialize. + async fn update( + &self, + w_id: &str, + oauth_data: &Self::OAuthData, + external_id: &str, + webhook_token: &str, + data: &NativeTriggerData, + db: &DB, + tx: &mut PgConnection, + ) -> Result { + let base_url = &*BASE_URL.read().await; + + let webhook_url = generate_webhook_service_url( + base_url, w_id, &data.script_path, data.is_flow, + Some(external_id), Self::SERVICE_NAME, webhook_token, + ); + + let url = format!("{}/api/webhooks/{}", oauth_data.base_url, external_id); + let payload = serde_json::json!({ + "callback_url": webhook_url, + "folder_path": data.service_config.folder_path, + }); + + let _: serde_json::Value = self + .http_client_request(&url, Method::PUT, w_id, tx, db, None, Some(&payload)) + .await?; + + // Fetch back the updated state to get the resolved config + let trigger_data = self.get(w_id, oauth_data, external_id, db, tx).await?; + serde_json::to_value(&trigger_data) + .map_err(|e| Error::InternalErr(format!("Failed to serialize trigger data: {}", e))) + } + + async fn get( + &self, + w_id: &str, + oauth_data: &Self::OAuthData, + external_id: &str, + db: &DB, + tx: &mut PgConnection, + ) -> Result { + let url = format!("{}/api/webhooks/{}", oauth_data.base_url, external_id); + self.http_client_request::<_, ()>(&url, Method::GET, w_id, tx, db, None, None).await + } + + async fn delete( + &self, + w_id: &str, + oauth_data: &Self::OAuthData, + external_id: &str, + db: &DB, + tx: &mut PgConnection, + ) -> Result<()> { + let url = format!("{}/api/webhooks/{}", oauth_data.base_url, external_id); + let _: serde_json::Value = self + .http_client_request::<_, ()>(&url, Method::DELETE, w_id, tx, db, None, None) + .await + .or_else(|e| match &e { + Error::InternalErr(msg) if msg.contains("404") => Ok(serde_json::Value::Null), + _ => Err(e), + })?; + Ok(()) + } + + async fn exists( + &self, + w_id: &str, + oauth_data: &Self::OAuthData, + external_id: &str, + db: &DB, + tx: &mut PgConnection, + ) -> Result { + match self.get(w_id, oauth_data, external_id, db, tx).await { + Ok(_) => Ok(true), + Err(Error::NotFound(_)) => Ok(false), + Err(e) => Err(e), + } + } + + /// Background maintenance. Choose the right pattern for your service: + /// - For services with queryable external state: use reconcile_with_external_state() + /// - For channel-based services with expiration: implement renewal logic + async fn maintain_triggers( + &self, + db: &DB, + workspace_id: &str, + triggers: &[NativeTrigger], + oauth_data: &Self::OAuthData, + synced: &mut Vec, + errors: &mut Vec, + ) { + // Option A: Reconcile with external state (Nextcloud pattern) + // Fetch all triggers from external service and compare with DB + let external_triggers = match self.list_all(workspace_id, oauth_data, db).await { + Ok(triggers) => triggers, + Err(e) => { + errors.push(SyncError { + resource_path: format!("workspace:{}", workspace_id), + error_message: format!("Failed to list triggers: {}", e), + error_type: "api_error".to_string(), + }); + return; + } + }; + + // Convert to (external_id, config_json) pairs + let external_pairs: Vec<(String, serde_json::Value)> = external_triggers + .into_iter() + .map(|t| (t.id.clone(), serde_json::to_value(&t).unwrap_or_default())) + .collect(); + + crate::sync::reconcile_with_external_state( + db, workspace_id, Self::SERVICE_NAME, triggers, &external_pairs, synced, errors, + ).await; + } + + fn external_id_and_metadata_from_response( + &self, + resp: &Self::CreateResponse, + ) -> (String, Option) { + (resp.id.clone(), None) + } + + // service_config_from_create_response: NOT overridden (returns None). + // This means the handler uses the update+get pattern after create. + // Override and return Some(...) to skip the update+get cycle (Google pattern). +} + +impl NewService { + /// Private helper to list all triggers from the external service. + async fn list_all( + &self, + w_id: &str, + oauth_data: &::OAuthData, + db: &DB, + ) -> Result::TriggerData>> { + // Implementation depends on the external service's API + todo!() + } +} +``` + +### Step 4: Update lib.rs Registry + +In `backend/windmill-native-triggers/src/lib.rs`: + +```rust +// Service modules - add new services here: +#[cfg(feature = "native_trigger")] +pub mod newservice; // <-- Add this + +// ServiceName enum - add variant: +pub enum ServiceName { + Nextcloud, + Google, + NewService, // <-- Add this +} + +// Then add match arms in ALL ServiceName methods: +// as_str(), as_trigger_kind(), as_job_trigger_kind(), token_endpoint(), +// auth_endpoint(), oauth_scopes(), resource_type(), extra_auth_params(), +// integration_service(), TryFrom, Display +``` + +### Step 5: Update handler.rs Routes + +In `backend/windmill-native-triggers/src/handler.rs`: + +```rust +pub fn generate_native_trigger_routers() -> Router { + // ... + #[cfg(feature = "native_trigger")] + { + use crate::newservice::NewService; + return router + .nest("/nextcloud", service_routes(NextCloud)) + .nest("/google", service_routes(Google)) + .nest("/newservice", service_routes(NewService)); // <-- Add this + } + // ... +} +``` + +### Step 6: Update sync.rs + +In `backend/windmill-native-triggers/src/sync.rs`: + +```rust +pub async fn sync_all_triggers(db: &DB) -> Result { + // ... + #[cfg(feature = "native_trigger")] + { + use crate::newservice::NewService; + + // ... existing service syncs ... + + // New service sync + let (service_name, result) = sync_service_triggers(db, NewService).await; + total_synced += result.synced_triggers.len(); + total_errors += result.errors.len(); + service_results.insert(service_name, result); + } + // ... +} +``` + +### Step 7: Frontend Service Registry + +In `frontend/src/lib/components/triggers/native/utils.ts`: + +Add to `NATIVE_TRIGGER_SERVICES`, `getTriggerIconName()`, and `getServiceIcon()`. + +### Step 8: Frontend Trigger Form Component + +Create: `frontend/src/lib/components/triggers/native/services/newservice/NewServiceTriggerForm.svelte` + +### Step 9: Frontend Icon Component + +Create: `frontend/src/lib/components/icons/NewServiceIcon.svelte` + +### Step 10: Update NativeTriggerEditor + +Check `frontend/src/lib/components/triggers/native/NativeTriggerEditor.svelte` to ensure it dynamically loads form components based on service name. + +### Step 11: Workspace Integration UI + +Add your service to the `supportedServices` map in `frontend/src/lib/components/workspaceSettings/WorkspaceIntegrations.svelte`: + +```typescript +const supportedServices: Record = { + // ... existing services ... + newservice: { + name: 'newservice', + displayName: 'New Service', + description: 'Connect to New Service for triggers', + icon: NewServiceIcon, + docsUrl: 'https://www.windmill.dev/docs/integrations/newservice', + requiresBaseUrl: false, // false for cloud services, true for self-hosted + setupInstructions: [ + 'Step 1: Create an OAuth app on the service', + 'Step 2: Configure the redirect URI shown below', + 'Step 3: Enter the client credentials below' + ] + } +} +``` + +### Step 12: Update `frontend/src/lib/components/triggers/utils.ts` + +Update ALL of these maps/functions: +1. `triggerIconMap` - import and add icon +2. `triggerDisplayNamesMap` - add display name +3. `triggerTypeOrder` in `sortTriggers()` - add type +4. `getLightConfig()` - add case for your service +5. `getTriggerLabel()` - add case for your service +6. `jobTriggerKinds` - add to array +7. `countPropertyMap` - add count property +8. `triggerSaveFunctions` - add save function + +### Step 13: Update TriggersBadge Component + +In `frontend/src/lib/components/graph/renderers/triggers/TriggersBadge.svelte`: + +1. Import the icon +2. Add to `baseConfig` with `countKey` (the dynamic `availableNativeServices` loop does NOT set `countKey`) +3. Add to the `allTypes` array + +### Step 14: Update OpenAPI Spec and Regenerate Types + +Add to `JobTriggerKind` enum in `backend/windmill-api/openapi.yaml`, then: + +```bash +cd frontend && npm run generate-backend-client +``` + +--- + +## Special Patterns + +### Unified Service with `trigger_type` (Google Pattern) + +When a single service handles multiple trigger types (e.g., Google Drive + Calendar share OAuth and API patterns), use a single `ServiceName` variant with a discriminator field: + +```rust +pub enum GoogleTriggerType { Drive, Calendar } + +pub struct GoogleServiceConfig { + pub trigger_type: GoogleTriggerType, + // Drive-specific fields (only used when trigger_type = Drive) + pub resource_id: Option, + pub resource_name: Option, + // Calendar-specific fields (only used when trigger_type = Calendar) + pub calendar_id: Option, + pub calendar_name: Option, + // Metadata set after creation + pub google_resource_id: Option, + pub expiration: Option, +} +``` + +Branch in trait methods based on `trigger_type`. Frontend uses a `ToggleButtonGroup` to switch between types. This keeps the codebase simpler (one service, one OAuth flow, one set of routes). + +See `backend/windmill-native-triggers/src/google/` for the reference implementation. + +### Skipping update+get After Create (Google Pattern) + +Override `service_config_from_create_response()` to return `Some(config)` when the external_id is known before the create call: + +```rust +fn service_config_from_create_response( + &self, + data: &NativeTriggerData, + resp: &Self::CreateResponse, +) -> Option { + // Clone input config, add metadata from response + let mut config = data.service_config.clone(); + config.google_resource_id = Some(resp.resource_id.clone()); + config.expiration = Some(resp.expiration.clone()); + Some(serde_json::to_value(&config).unwrap()) +} +``` + +### Services with Absolute OAuth Endpoints (Google) + +Unlike self-hosted services where OAuth endpoints are relative paths appended to `base_url`, services like Google have absolute URLs: + +```rust +// Nextcloud: relative paths +ServiceName::Nextcloud => "/apps/oauth2/api/v1/token", +// Google: absolute URLs +ServiceName::Google => "https://oauth2.googleapis.com/token", +``` + +The `resolve_endpoint()` function handles both. For services with absolute endpoints: +- `base_url` can be empty +- `requiresBaseUrl: false` in the frontend workspace integration config +- Add `extra_auth_params()` if needed (Google requires `access_type=offline` and `prompt=consent`) + +### Channel-Based Push Notifications with Renewal (Google Pattern) + +For services using expiring watch channels instead of persistent webhooks: + +1. Store expiration in `service_config` (as part of `ServiceConfig`) +2. In `maintain_triggers()`, implement renewal logic instead of using `reconcile_with_external_state()`: + ```rust + async fn maintain_triggers(&self, db, workspace_id, triggers, oauth_data, synced, errors) { + for trigger in triggers { + if should_renew_channel(trigger) { + self.renew_channel(db, trigger, oauth_data).await; + } + } + } + ``` +3. Renewal: best-effort stop old channel, create new one with same external_id, update service_config with new expiration +4. Google example: Drive channels expire in 24h (renew when <1h left), Calendar channels expire in 7 days (renew when <1 day left) + +### reconcile_with_external_state (Nextcloud Pattern) + +The reusable function in `sync.rs` compares external triggers with DB state: +- Triggers missing externally: sets error "Trigger no longer exists on external service" +- Triggers present externally: clears errors, updates service_config if it differs + +Usage in `maintain_triggers()`: +```rust +let external_pairs: Vec<(String, serde_json::Value)> = /* fetch from external */; +crate::sync::reconcile_with_external_state( + db, workspace_id, Self::SERVICE_NAME, triggers, &external_pairs, synced, errors, +).await; +``` + +### Webhook Payload Processing + +Override `prepare_webhook()` to parse service-specific payloads into script/flow args: + +```rust +async fn prepare_webhook(&self, db, w_id, headers, body, script_path, is_flow) -> Result { + let mut args = HashMap::new(); + args.insert("event_type".to_string(), Box::new(headers.get("x-event-type").cloned()) as _); + args.insert("payload".to_string(), Box::new(serde_json::from_str::(&body)?) as _); + Ok(PushArgsOwned { extra: None, args }) +} +``` + +Then register in `prepare_native_trigger_args()` in `lib.rs`: +```rust +pub async fn prepare_native_trigger_args(service_name, db, w_id, headers, body) -> Result> { + match service_name { + ServiceName::Google => { /* ... */ Ok(Some(args)) } + ServiceName::NewService => { /* ... */ Ok(Some(args)) } + ServiceName::Nextcloud => Ok(None), // Uses default body parsing + } +} +``` + +### Instance-Level OAuth Credentials + +When `workspace_integrations.oauth_data.instance_shared == true`, `decrypt_oauth_data()` reads `client_id` and `client_secret` from instance-level global settings instead of workspace-level. This allows admins to share OAuth app credentials across workspaces. + +The frontend handles this via the `generate_instance_connect_url` endpoint in `workspace_integrations.rs`. + +--- + +## Testing Checklist + +- [ ] Database migration runs successfully +- [ ] `cargo check -p windmill-native-triggers --features native_trigger` passes +- [ ] `npx svelte-check --threshold error` passes (in frontend/) +- [ ] Service appears in workspace integrations list +- [ ] OAuth flow completes successfully +- [ ] Can create a new trigger +- [ ] Can view trigger details +- [ ] Can update trigger configuration +- [ ] Can delete trigger +- [ ] Webhook receives and processes payloads +- [ ] Background sync works correctly (reconciliation or channel renewal) +- [ ] Error handling works (expired tokens, service unavailable) + +--- + +## Reference Implementations + +### Nextcloud (Self-Hosted, Update+Get Pattern) + +| File | Purpose | +|------|---------| +| `nextcloud/mod.rs` | Types: NextCloudOAuthData, NextcloudServiceConfig, NextCloudTriggerData | +| `nextcloud/external.rs` | External trait: uses update+get pattern, reconcile_with_external_state for sync | +| `nextcloud/routes.rs` | Additional route: `GET /events` | + +Key patterns: relative OAuth endpoints, base_url required, list_all + reconcile for sync, update returns JSON from get(). + +### Google (Cloud, Unified Service, Short Create) + +| File | Purpose | +|------|---------| +| `google/mod.rs` | Types: GoogleServiceConfig with trigger_type discriminator, GoogleTriggerType enum | +| `google/external.rs` | External trait: overrides service_config_from_create_response, channel renewal for sync | +| `google/routes.rs` | Additional routes: `GET /calendars`, `GET /drive/files`, `GET /drive/shared_drives` | + +Key patterns: absolute OAuth endpoints, empty base_url, trigger_type for Drive/Calendar, expiring watch channels with renewal, service_config_from_create_response skips update+get, get() reconstructs data from stored service_config (no external "get channel" API). diff --git a/README.md b/README.md index 1c8010db84697..91ad7e31ec7d2 100644 --- a/README.md +++ b/README.md @@ -257,6 +257,8 @@ On self-hosted instances, you might want to import all the approved resource typ | BASE_URL | http://localhost:8000 | The base url that is exposed publicly to access your instance. Is overriden by the instance settings if any. | Server | | ZOMBIE_JOB_TIMEOUT | 30 | The timeout after which a job is considered to be zombie if the worker did not send pings about processing the job (every server check for zombie jobs every 30s) | Server | | RESTART_ZOMBIE_JOBS | true | If true then a zombie job is restarted (in-place with the same uuid and some logs), if false the zombie job is failed | Server | +| NATIVE_MODE | false | Enable native mode: sets NUM_WORKERS=8, rejects non-native jobs (nativets, postgresql, mysql, etc.) | Worker | +| I_ACK_NUM_WORKERS_IS_UNSAFE | false | Acknowledge running with NUM_WORKERS > 1 without native mode (required to bypass safety check) | Worker | | SLEEP_QUEUE | 50 | The number of ms to sleep in between the last check for new jobs in the DB. It is multiplied by NUM_WORKERS such that in average, for one worker instance, there is one pull every SLEEP_QUEUE ms. | Worker | | KEEP_JOB_DIR | false | Keep the job directory after the job is done. Useful for debugging. | Worker | | LICENSE_KEY (EE only) | None | License key checked at startup for the Enterprise Edition of Windmill | Worker | diff --git a/backend/.sqlx/query-5368683c19f8d6744d5dbc53e5b2ab0f2348646d79f5306c6868e2c3a8f389ee.json b/backend/.sqlx/query-0010ef26da16facd1c2c832601ac687c4c27de46a90f45496b8446af1a9d0578.json similarity index 55% rename from backend/.sqlx/query-5368683c19f8d6744d5dbc53e5b2ab0f2348646d79f5306c6868e2c3a8f389ee.json rename to backend/.sqlx/query-0010ef26da16facd1c2c832601ac687c4c27de46a90f45496b8446af1a9d0578.json index ebf1df39a35b9..59f5aeb848bf1 100644 --- a/backend/.sqlx/query-5368683c19f8d6744d5dbc53e5b2ab0f2348646d79f5306c6868e2c3a8f389ee.json +++ b/backend/.sqlx/query-0010ef26da16facd1c2c832601ac687c4c27de46a90f45496b8446af1a9d0578.json @@ -1,11 +1,11 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n oauth_data as \"oauth_data!: sqlx::types::Json\",\n service_name as \"service_name!: ServiceName\"\n FROM\n workspace_integrations\n WHERE\n workspace_id = $1\n ", + "query": "\n SELECT\n oauth_data as \"oauth_data: sqlx::types::Json\",\n service_name as \"service_name!: ServiceName\",\n resource_path\n FROM\n workspace_integrations\n WHERE\n workspace_id = $1\n ", "describe": { "columns": [ { "ordinal": 0, - "name": "oauth_data!: sqlx::types::Json", + "name": "oauth_data: sqlx::types::Json", "type_info": "Jsonb" }, { @@ -22,6 +22,11 @@ } } } + }, + { + "ordinal": 2, + "name": "resource_path", + "type_info": "Text" } ], "parameters": { @@ -30,9 +35,10 @@ ] }, "nullable": [ + true, false, - false + true ] }, - "hash": "5368683c19f8d6744d5dbc53e5b2ab0f2348646d79f5306c6868e2c3a8f389ee" + "hash": "0010ef26da16facd1c2c832601ac687c4c27de46a90f45496b8446af1a9d0578" } diff --git a/backend/.sqlx/query-05e05a9b979941c7a11cd881da652f459e4a0444d63a96deba4a879fbe1124ff.json b/backend/.sqlx/query-05e05a9b979941c7a11cd881da652f459e4a0444d63a96deba4a879fbe1124ff.json new file mode 100644 index 0000000000000..9511913ffeee6 --- /dev/null +++ b/backend/.sqlx/query-05e05a9b979941c7a11cd881da652f459e4a0444d63a96deba4a879fbe1124ff.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "DELETE FROM resource WHERE workspace_id = $1 AND path = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Text" + ] + }, + "nullable": [] + }, + "hash": "05e05a9b979941c7a11cd881da652f459e4a0444d63a96deba4a879fbe1124ff" +} diff --git a/backend/.sqlx/query-3b3f60623126626b52ca0a4a188655ddf728cd3f21ee308db7393694ccc5c7b3.json b/backend/.sqlx/query-1af48c42255f1c973b4a9c9a58050bf5ec1ee6f93f0a90c1c7d0c0fcd816702d.json similarity index 58% rename from backend/.sqlx/query-3b3f60623126626b52ca0a4a188655ddf728cd3f21ee308db7393694ccc5c7b3.json rename to backend/.sqlx/query-1af48c42255f1c973b4a9c9a58050bf5ec1ee6f93f0a90c1c7d0c0fcd816702d.json index 0df550b57c987..b32d1e51d3b02 100644 --- a/backend/.sqlx/query-3b3f60623126626b52ca0a4a188655ddf728cd3f21ee308db7393694ccc5c7b3.json +++ b/backend/.sqlx/query-1af48c42255f1c973b4a9c9a58050bf5ec1ee6f93f0a90c1c7d0c0fcd816702d.json @@ -1,11 +1,10 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE workspace_integrations\n SET oauth_data = $1, updated_at = now()\n WHERE workspace_id = $2 AND service_name = $3\n ", + "query": "DELETE FROM native_trigger WHERE workspace_id = $1 AND service_name = $2", "describe": { "columns": [], "parameters": { "Left": [ - "Jsonb", "Text", { "Custom": { @@ -22,5 +21,5 @@ }, "nullable": [] }, - "hash": "3b3f60623126626b52ca0a4a188655ddf728cd3f21ee308db7393694ccc5c7b3" + "hash": "1af48c42255f1c973b4a9c9a58050bf5ec1ee6f93f0a90c1c7d0c0fcd816702d" } diff --git a/backend/.sqlx/query-e26ccc6607a9c78c1a8c1fd7b3bec931cf0ed27f79f852ae7f63a0ed6e12042f.json b/backend/.sqlx/query-1ba2e23d4ba816048ec1e88af9e342867fc0443cabea16d111afa2b91d3fe03b.json similarity index 75% rename from backend/.sqlx/query-e26ccc6607a9c78c1a8c1fd7b3bec931cf0ed27f79f852ae7f63a0ed6e12042f.json rename to backend/.sqlx/query-1ba2e23d4ba816048ec1e88af9e342867fc0443cabea16d111afa2b91d3fe03b.json index 53a30348d5926..3552ba7a79e27 100644 --- a/backend/.sqlx/query-e26ccc6607a9c78c1a8c1fd7b3bec931cf0ed27f79f852ae7f63a0ed6e12042f.json +++ b/backend/.sqlx/query-1ba2e23d4ba816048ec1e88af9e342867fc0443cabea16d111afa2b91d3fe03b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "SELECT client, refresh_token, grant_type, cc_client_id, cc_client_secret, cc_token_url, mcp_server_url FROM account WHERE workspace_id = $1 AND id = $2", + "query": "SELECT client, refresh_token, grant_type, cc_client_id, cc_client_secret, cc_token_url, mcp_server_url, is_workspace_integration FROM account WHERE workspace_id = $1 AND id = $2", "describe": { "columns": [ { @@ -37,6 +37,11 @@ "ordinal": 6, "name": "mcp_server_url", "type_info": "Text" + }, + { + "ordinal": 7, + "name": "is_workspace_integration", + "type_info": "Bool" } ], "parameters": { @@ -52,8 +57,9 @@ true, true, true, - true + true, + false ] }, - "hash": "e26ccc6607a9c78c1a8c1fd7b3bec931cf0ed27f79f852ae7f63a0ed6e12042f" + "hash": "1ba2e23d4ba816048ec1e88af9e342867fc0443cabea16d111afa2b91d3fe03b" } diff --git a/backend/.sqlx/query-26beff5e94b68703ad81ef9dd2d08869eb3bb7659efd9bac04cdf98ae963063d.json b/backend/.sqlx/query-26beff5e94b68703ad81ef9dd2d08869eb3bb7659efd9bac04cdf98ae963063d.json new file mode 100644 index 0000000000000..b8c8f832d314d --- /dev/null +++ b/backend/.sqlx/query-26beff5e94b68703ad81ef9dd2d08869eb3bb7659efd9bac04cdf98ae963063d.json @@ -0,0 +1,25 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO account (workspace_id, client, expires_at, refresh_token, is_workspace_integration)\n VALUES ($1, $2, now() + ($3 || ' seconds')::interval, $4, true)\n RETURNING id", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Varchar", + "Varchar", + "Text", + "Varchar" + ] + }, + "nullable": [ + false + ] + }, + "hash": "26beff5e94b68703ad81ef9dd2d08869eb3bb7659efd9bac04cdf98ae963063d" +} diff --git a/backend/.sqlx/query-27065225c6affd26f1533dacffe1c38321511b5a7dd2a7e9435c04868188fd44.json b/backend/.sqlx/query-27065225c6affd26f1533dacffe1c38321511b5a7dd2a7e9435c04868188fd44.json new file mode 100644 index 0000000000000..f9232bbc381ef --- /dev/null +++ b/backend/.sqlx/query-27065225c6affd26f1533dacffe1c38321511b5a7dd2a7e9435c04868188fd44.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "DELETE FROM account WHERE workspace_id = $1 AND client = $2 AND is_workspace_integration = true RETURNING id", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Text", + "Text" + ] + }, + "nullable": [ + false + ] + }, + "hash": "27065225c6affd26f1533dacffe1c38321511b5a7dd2a7e9435c04868188fd44" +} diff --git a/backend/.sqlx/query-2e5dd992b0bfd7550d6f4cb5424a1c14352527b98249bce286790641bf56491e.json b/backend/.sqlx/query-2e5dd992b0bfd7550d6f4cb5424a1c14352527b98249bce286790641bf56491e.json new file mode 100644 index 0000000000000..14fe90b617bd5 --- /dev/null +++ b/backend/.sqlx/query-2e5dd992b0bfd7550d6f4cb5424a1c14352527b98249bce286790641bf56491e.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT count(*) FROM native_trigger WHERE workspace_id = 'test-workspace' AND service_name = 'google'", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "2e5dd992b0bfd7550d6f4cb5424a1c14352527b98249bce286790641bf56491e" +} diff --git a/backend/.sqlx/query-3db01d63ceefbb91389ed83f0a9c3e152b555b2f2e94fc3c1ff0953254fbd75e.json b/backend/.sqlx/query-3db01d63ceefbb91389ed83f0a9c3e152b555b2f2e94fc3c1ff0953254fbd75e.json new file mode 100644 index 0000000000000..8fe4e4197fc88 --- /dev/null +++ b/backend/.sqlx/query-3db01d63ceefbb91389ed83f0a9c3e152b555b2f2e94fc3c1ff0953254fbd75e.json @@ -0,0 +1,34 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT EXISTS(\n SELECT 1 FROM native_trigger\n WHERE external_id = $1 AND service_name = $2 AND workspace_id = $3\n )\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "exists", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Text", + { + "Custom": { + "name": "native_trigger_service", + "kind": { + "Enum": [ + "nextcloud", + "google" + ] + } + } + }, + "Text" + ] + }, + "nullable": [ + null + ] + }, + "hash": "3db01d63ceefbb91389ed83f0a9c3e152b555b2f2e94fc3c1ff0953254fbd75e" +} diff --git a/backend/.sqlx/query-4be53f0b801ebc1a33a184556fd138fdec8082f31f56d7023cf8c6311964f3b0.json b/backend/.sqlx/query-4be53f0b801ebc1a33a184556fd138fdec8082f31f56d7023cf8c6311964f3b0.json new file mode 100644 index 0000000000000..21b45e254a153 --- /dev/null +++ b/backend/.sqlx/query-4be53f0b801ebc1a33a184556fd138fdec8082f31f56d7023cf8c6311964f3b0.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO global_settings (name, value) VALUES ('oauths', $1)\n ON CONFLICT (name) DO UPDATE SET value = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Jsonb" + ] + }, + "nullable": [] + }, + "hash": "4be53f0b801ebc1a33a184556fd138fdec8082f31f56d7023cf8c6311964f3b0" +} diff --git a/backend/.sqlx/query-5a219a2532517869578c4504ff3153c43903f929ae5d62fbba12610f89c36d55.json b/backend/.sqlx/query-5a219a2532517869578c4504ff3153c43903f929ae5d62fbba12610f89c36d55.json index 36ddb8ab9fd94..713ccb9dd35d9 100644 --- a/backend/.sqlx/query-5a219a2532517869578c4504ff3153c43903f929ae5d62fbba12610f89c36d55.json +++ b/backend/.sqlx/query-5a219a2532517869578c4504ff3153c43903f929ae5d62fbba12610f89c36d55.json @@ -15,7 +15,7 @@ ] }, "nullable": [ - true + null ] }, "hash": "5a219a2532517869578c4504ff3153c43903f929ae5d62fbba12610f89c36d55" diff --git a/backend/.sqlx/query-607c13333627e79557d3b6f6f68eee0a5dbe7cd4643e4bf99a592eb1bb82580c.json b/backend/.sqlx/query-607c13333627e79557d3b6f6f68eee0a5dbe7cd4643e4bf99a592eb1bb82580c.json new file mode 100644 index 0000000000000..b797ef2418623 --- /dev/null +++ b/backend/.sqlx/query-607c13333627e79557d3b6f6f68eee0a5dbe7cd4643e4bf99a592eb1bb82580c.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT count(*) FROM variable WHERE workspace_id = 'test-workspace' AND path = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + null + ] + }, + "hash": "607c13333627e79557d3b6f6f68eee0a5dbe7cd4643e4bf99a592eb1bb82580c" +} diff --git a/backend/.sqlx/query-6868520d496afe306bbd93293076ea4bb155097d1e8d3ffe5b75dd80ced735de.json b/backend/.sqlx/query-6868520d496afe306bbd93293076ea4bb155097d1e8d3ffe5b75dd80ced735de.json new file mode 100644 index 0000000000000..769c83d5f0197 --- /dev/null +++ b/backend/.sqlx/query-6868520d496afe306bbd93293076ea4bb155097d1e8d3ffe5b75dd80ced735de.json @@ -0,0 +1,19 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO resource (workspace_id, path, value, resource_type, description, created_by)\n VALUES ($1, $2, $3, $4, $5, $6)\n ON CONFLICT (workspace_id, path) DO UPDATE\n SET value = EXCLUDED.value, resource_type = EXCLUDED.resource_type", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Varchar", + "Jsonb", + "Varchar", + "Text", + "Varchar" + ] + }, + "nullable": [] + }, + "hash": "6868520d496afe306bbd93293076ea4bb155097d1e8d3ffe5b75dd80ced735de" +} diff --git a/backend/.sqlx/query-6b4d48527af6f1411dc5e03f9144fb127488a79ac53a154d71253628320b1084.json b/backend/.sqlx/query-6b4d48527af6f1411dc5e03f9144fb127488a79ac53a154d71253628320b1084.json new file mode 100644 index 0000000000000..72f8c30c11bfa --- /dev/null +++ b/backend/.sqlx/query-6b4d48527af6f1411dc5e03f9144fb127488a79ac53a154d71253628320b1084.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT refresh_token FROM account WHERE workspace_id = $1 AND id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "refresh_token", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Text", + "Int4" + ] + }, + "nullable": [ + false + ] + }, + "hash": "6b4d48527af6f1411dc5e03f9144fb127488a79ac53a154d71253628320b1084" +} diff --git a/backend/.sqlx/query-6c2872eb025eecbfd7459d7a387545940d94ed5bce49ccbc2b8ff838c89919d6.json b/backend/.sqlx/query-6c2872eb025eecbfd7459d7a387545940d94ed5bce49ccbc2b8ff838c89919d6.json new file mode 100644 index 0000000000000..9a268a1f123c6 --- /dev/null +++ b/backend/.sqlx/query-6c2872eb025eecbfd7459d7a387545940d94ed5bce49ccbc2b8ff838c89919d6.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT oauth_data FROM workspace_integrations WHERE workspace_id = $1 AND service_name::text = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "oauth_data", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Text", + "Text" + ] + }, + "nullable": [ + true + ] + }, + "hash": "6c2872eb025eecbfd7459d7a387545940d94ed5bce49ccbc2b8ff838c89919d6" +} diff --git a/backend/.sqlx/query-3e5bdc2e071fc2f1e3c7971736272f20bb5a0aa921a614bd02898d3f162660c2.json b/backend/.sqlx/query-7443ba1f922e190bb9a0ca313847f8d27c35a6ee3aff20157d6285e73aa923ef.json similarity index 75% rename from backend/.sqlx/query-3e5bdc2e071fc2f1e3c7971736272f20bb5a0aa921a614bd02898d3f162660c2.json rename to backend/.sqlx/query-7443ba1f922e190bb9a0ca313847f8d27c35a6ee3aff20157d6285e73aa923ef.json index fdcb10be1c7e5..7043ac9d89bfd 100644 --- a/backend/.sqlx/query-3e5bdc2e071fc2f1e3c7971736272f20bb5a0aa921a614bd02898d3f162660c2.json +++ b/backend/.sqlx/query-7443ba1f922e190bb9a0ca313847f8d27c35a6ee3aff20157d6285e73aa923ef.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n workspace_id,\n service_name AS \"service_name!: ServiceName\",\n oauth_data,\n created_at,\n updated_at,\n created_by\n FROM\n workspace_integrations\n WHERE\n workspace_id = $1\n AND service_name = $2\n ", + "query": "\n SELECT\n workspace_id,\n service_name AS \"service_name!: ServiceName\",\n oauth_data,\n resource_path,\n created_at,\n updated_at,\n created_by\n FROM\n workspace_integrations\n WHERE\n workspace_id = $1\n AND service_name = $2\n ", "describe": { "columns": [ { @@ -30,16 +30,21 @@ }, { "ordinal": 3, + "name": "resource_path", + "type_info": "Text" + }, + { + "ordinal": 4, "name": "created_at", "type_info": "Timestamptz" }, { - "ordinal": 4, + "ordinal": 5, "name": "updated_at", "type_info": "Timestamptz" }, { - "ordinal": 5, + "ordinal": 6, "name": "created_by", "type_info": "Varchar" } @@ -63,11 +68,12 @@ "nullable": [ false, false, - false, + true, + true, false, false, false ] }, - "hash": "3e5bdc2e071fc2f1e3c7971736272f20bb5a0aa921a614bd02898d3f162660c2" + "hash": "7443ba1f922e190bb9a0ca313847f8d27c35a6ee3aff20157d6285e73aa923ef" } diff --git a/backend/.sqlx/query-7a57f58e809e482a599722d3887fb7e115506ff1e5ec9cf6dd2af84ed9a78632.json b/backend/.sqlx/query-7a57f58e809e482a599722d3887fb7e115506ff1e5ec9cf6dd2af84ed9a78632.json new file mode 100644 index 0000000000000..e5df81b3c4974 --- /dev/null +++ b/backend/.sqlx/query-7a57f58e809e482a599722d3887fb7e115506ff1e5ec9cf6dd2af84ed9a78632.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO account (workspace_id, client, expires_at, refresh_token, is_workspace_integration)\n VALUES ('test-workspace', $1, now() + interval '1 hour', $2, true)\n RETURNING id", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Varchar", + "Varchar" + ] + }, + "nullable": [ + false + ] + }, + "hash": "7a57f58e809e482a599722d3887fb7e115506ff1e5ec9cf6dd2af84ed9a78632" +} diff --git a/backend/.sqlx/query-95c57fb921a2e3725b92cbafac6e3dc360b88429f03dd1e2b1b55cfabe208cb7.json b/backend/.sqlx/query-823fc5f998fe747ec8537752d9eb7ef548b2fd9ee1f5380084f27796c2bcc8ad.json similarity index 68% rename from backend/.sqlx/query-95c57fb921a2e3725b92cbafac6e3dc360b88429f03dd1e2b1b55cfabe208cb7.json rename to backend/.sqlx/query-823fc5f998fe747ec8537752d9eb7ef548b2fd9ee1f5380084f27796c2bcc8ad.json index a189bc4da26c9..4cd2634618ce1 100644 --- a/backend/.sqlx/query-95c57fb921a2e3725b92cbafac6e3dc360b88429f03dd1e2b1b55cfabe208cb7.json +++ b/backend/.sqlx/query-823fc5f998fe747ec8537752d9eb7ef548b2fd9ee1f5380084f27796c2bcc8ad.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT EXISTS (\n SELECT 1\n FROM workspace_integrations\n WHERE workspace_id = $1\n AND service_name = $2\n AND oauth_data IS NOT NULL\n )\n ", + "query": "\n SELECT EXISTS (\n SELECT 1\n FROM workspace_integrations wi\n WHERE wi.workspace_id = $1\n AND wi.service_name = $2\n AND wi.oauth_data IS NOT NULL\n )\n ", "describe": { "columns": [ { @@ -29,5 +29,5 @@ null ] }, - "hash": "95c57fb921a2e3725b92cbafac6e3dc360b88429f03dd1e2b1b55cfabe208cb7" + "hash": "823fc5f998fe747ec8537752d9eb7ef548b2fd9ee1f5380084f27796c2bcc8ad" } diff --git a/backend/.sqlx/query-826a4216830f6a930c382209a20bc7f8b460064480e080b989e31df3d6a30e31.json b/backend/.sqlx/query-826a4216830f6a930c382209a20bc7f8b460064480e080b989e31df3d6a30e31.json new file mode 100644 index 0000000000000..3493ebe065a83 --- /dev/null +++ b/backend/.sqlx/query-826a4216830f6a930c382209a20bc7f8b460064480e080b989e31df3d6a30e31.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "DELETE FROM variable WHERE workspace_id = $1 AND account = ANY($2)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Int4Array" + ] + }, + "nullable": [] + }, + "hash": "826a4216830f6a930c382209a20bc7f8b460064480e080b989e31df3d6a30e31" +} diff --git a/backend/.sqlx/query-83d6e371ca84903e9f487afc065353a9f7be86ff752612909587ec3cb770cb75.json b/backend/.sqlx/query-83d6e371ca84903e9f487afc065353a9f7be86ff752612909587ec3cb770cb75.json new file mode 100644 index 0000000000000..d6240c6b3ae55 --- /dev/null +++ b/backend/.sqlx/query-83d6e371ca84903e9f487afc065353a9f7be86ff752612909587ec3cb770cb75.json @@ -0,0 +1,39 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT external_id, webhook_token_prefix FROM native_trigger WHERE workspace_id = $1 AND service_name = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "external_id", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "webhook_token_prefix", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Text", + { + "Custom": { + "name": "native_trigger_service", + "kind": { + "Enum": [ + "nextcloud", + "google" + ] + } + } + } + ] + }, + "nullable": [ + false, + false + ] + }, + "hash": "83d6e371ca84903e9f487afc065353a9f7be86ff752612909587ec3cb770cb75" +} diff --git a/backend/.sqlx/query-86f80fd909ef7e956a2e0806aaad728a74fae33c5c1836102fe518cc62faffef.json b/backend/.sqlx/query-86f80fd909ef7e956a2e0806aaad728a74fae33c5c1836102fe518cc62faffef.json new file mode 100644 index 0000000000000..ce89cc8839259 --- /dev/null +++ b/backend/.sqlx/query-86f80fd909ef7e956a2e0806aaad728a74fae33c5c1836102fe518cc62faffef.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE account SET refresh_token = $1, refresh_error = NULL\n WHERE workspace_id = $2 AND client = $3 AND is_workspace_integration = true", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Text", + "Text" + ] + }, + "nullable": [] + }, + "hash": "86f80fd909ef7e956a2e0806aaad728a74fae33c5c1836102fe518cc62faffef" +} diff --git a/backend/.sqlx/query-89791ad1a0862b6475ebfdeb54b0101e124fcf9d12e93d84b44457c72c7604a5.json b/backend/.sqlx/query-89791ad1a0862b6475ebfdeb54b0101e124fcf9d12e93d84b44457c72c7604a5.json new file mode 100644 index 0000000000000..ca3059dbd8bdb --- /dev/null +++ b/backend/.sqlx/query-89791ad1a0862b6475ebfdeb54b0101e124fcf9d12e93d84b44457c72c7604a5.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE workspace_integrations SET resource_path = REGEXP_REPLACE(resource_path, 'u/' || $2 || '/(.*)', 'u/' || $1 || '/\\1') WHERE resource_path LIKE ('u/' || $2 || '/%') AND workspace_id = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Text", + "Text" + ] + }, + "nullable": [] + }, + "hash": "89791ad1a0862b6475ebfdeb54b0101e124fcf9d12e93d84b44457c72c7604a5" +} diff --git a/backend/.sqlx/query-8a4d42e373043bc509985ab320894bf3e6afa7b2019cc4739baac9165f7ead9e.json b/backend/.sqlx/query-8a4d42e373043bc509985ab320894bf3e6afa7b2019cc4739baac9165f7ead9e.json new file mode 100644 index 0000000000000..db5d9df3c7b5d --- /dev/null +++ b/backend/.sqlx/query-8a4d42e373043bc509985ab320894bf3e6afa7b2019cc4739baac9165f7ead9e.json @@ -0,0 +1,34 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT service_config\n FROM native_trigger\n WHERE external_id = $1 AND service_name = $2 AND workspace_id = $3\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "service_config", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Text", + { + "Custom": { + "name": "native_trigger_service", + "kind": { + "Enum": [ + "nextcloud", + "google" + ] + } + } + }, + "Text" + ] + }, + "nullable": [ + true + ] + }, + "hash": "8a4d42e373043bc509985ab320894bf3e6afa7b2019cc4739baac9165f7ead9e" +} diff --git a/backend/.sqlx/query-8e5881225f4bf7243bd40397ac8b8708fb6ce6c0ba0d263bb7eeb404f5dd62ff.json b/backend/.sqlx/query-8e5881225f4bf7243bd40397ac8b8708fb6ce6c0ba0d263bb7eeb404f5dd62ff.json new file mode 100644 index 0000000000000..d1d9fe86aa08d --- /dev/null +++ b/backend/.sqlx/query-8e5881225f4bf7243bd40397ac8b8708fb6ce6c0ba0d263bb7eeb404f5dd62ff.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE account SET refresh_token = $1 WHERE workspace_id = 'test-workspace' AND id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "8e5881225f4bf7243bd40397ac8b8708fb6ce6c0ba0d263bb7eeb404f5dd62ff" +} diff --git a/backend/.sqlx/query-8f33846f6c25a78267a5c8143b414f033280ebf57b7b9568dc2fe31bc625020d.json b/backend/.sqlx/query-8f33846f6c25a78267a5c8143b414f033280ebf57b7b9568dc2fe31bc625020d.json new file mode 100644 index 0000000000000..179c39eed6077 --- /dev/null +++ b/backend/.sqlx/query-8f33846f6c25a78267a5c8143b414f033280ebf57b7b9568dc2fe31bc625020d.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "DELETE FROM resource WHERE workspace_id = $1 AND resource_type = $2 AND path LIKE 'u/%/native_%'", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Text" + ] + }, + "nullable": [] + }, + "hash": "8f33846f6c25a78267a5c8143b414f033280ebf57b7b9568dc2fe31bc625020d" +} diff --git a/backend/.sqlx/query-9052b7cd438ff029a37bd489190d98d365acec09f2f102b7de71dcc9d356900e.json b/backend/.sqlx/query-9052b7cd438ff029a37bd489190d98d365acec09f2f102b7de71dcc9d356900e.json index 62f8d3100fd3e..413ae29c0f9b5 100644 --- a/backend/.sqlx/query-9052b7cd438ff029a37bd489190d98d365acec09f2f102b7de71dcc9d356900e.json +++ b/backend/.sqlx/query-9052b7cd438ff029a37bd489190d98d365acec09f2f102b7de71dcc9d356900e.json @@ -26,7 +26,7 @@ ] }, "nullable": [ - false + true ] }, "hash": "9052b7cd438ff029a37bd489190d98d365acec09f2f102b7de71dcc9d356900e" diff --git a/backend/.sqlx/query-9242b5a866d0dd489bebe4284413d37202be70068affca92c45d112e0210538a.json b/backend/.sqlx/query-9242b5a866d0dd489bebe4284413d37202be70068affca92c45d112e0210538a.json new file mode 100644 index 0000000000000..a812a80963ca5 --- /dev/null +++ b/backend/.sqlx/query-9242b5a866d0dd489bebe4284413d37202be70068affca92c45d112e0210538a.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT count(*) FROM resource WHERE workspace_id = 'test-workspace' AND path = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + null + ] + }, + "hash": "9242b5a866d0dd489bebe4284413d37202be70068affca92c45d112e0210538a" +} diff --git a/backend/.sqlx/query-993e514229a1d508a44ef07b4399f9697cdefaa1f45ca665f72bc6fcf2797c7e.json b/backend/.sqlx/query-993e514229a1d508a44ef07b4399f9697cdefaa1f45ca665f72bc6fcf2797c7e.json new file mode 100644 index 0000000000000..9b6ad19a0966a --- /dev/null +++ b/backend/.sqlx/query-993e514229a1d508a44ef07b4399f9697cdefaa1f45ca665f72bc6fcf2797c7e.json @@ -0,0 +1,33 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT resource_path FROM workspace_integrations WHERE workspace_id = $1 AND service_name = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "resource_path", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "Text", + { + "Custom": { + "name": "native_trigger_service", + "kind": { + "Enum": [ + "nextcloud", + "google" + ] + } + } + } + ] + }, + "nullable": [ + true + ] + }, + "hash": "993e514229a1d508a44ef07b4399f9697cdefaa1f45ca665f72bc6fcf2797c7e" +} diff --git a/backend/.sqlx/query-a264bbd8dbabb03854bd25350a7aeda0704770eb200bae635f1933eece90c9d6.json b/backend/.sqlx/query-a264bbd8dbabb03854bd25350a7aeda0704770eb200bae635f1933eece90c9d6.json deleted file mode 100644 index 10309651350f1..0000000000000 --- a/backend/.sqlx/query-a264bbd8dbabb03854bd25350a7aeda0704770eb200bae635f1933eece90c9d6.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "SELECT EXISTS(SELECT 1 FROM app WHERE path = 'g/all/setup_app')", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "exists", - "type_info": "Bool" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - null - ] - }, - "hash": "a264bbd8dbabb03854bd25350a7aeda0704770eb200bae635f1933eece90c9d6" -} diff --git a/backend/.sqlx/query-2602402bedcdbc45cdc0d64a76ce075c6ae51404037b0a7d4d33faf6a7d6a6d8.json b/backend/.sqlx/query-a588f4caa014008b50eccd09122b09f8a098e58791893bacaaf2ff67a30c031c.json similarity index 54% rename from backend/.sqlx/query-2602402bedcdbc45cdc0d64a76ce075c6ae51404037b0a7d4d33faf6a7d6a6d8.json rename to backend/.sqlx/query-a588f4caa014008b50eccd09122b09f8a098e58791893bacaaf2ff67a30c031c.json index 5a4cd8922451d..74c9e111bf6ff 100644 --- a/backend/.sqlx/query-2602402bedcdbc45cdc0d64a76ce075c6ae51404037b0a7d4d33faf6a7d6a6d8.json +++ b/backend/.sqlx/query-a588f4caa014008b50eccd09122b09f8a098e58791893bacaaf2ff67a30c031c.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO workspace_integrations (\n workspace_id,\n service_name,\n oauth_data,\n created_by,\n created_at,\n updated_at\n ) VALUES (\n $1, $2, $3, $4, now(), now()\n )\n ON CONFLICT (workspace_id, service_name)\n DO UPDATE SET\n oauth_data = $3,\n updated_at = now()\n ", + "query": "\n INSERT INTO workspace_integrations (\n workspace_id,\n service_name,\n oauth_data,\n resource_path,\n created_by,\n created_at,\n updated_at\n ) VALUES (\n $1, $2, $3, $4, $5, now(), now()\n )\n ON CONFLICT (workspace_id, service_name)\n DO UPDATE SET\n oauth_data = $3,\n resource_path = $4,\n updated_at = now()\n ", "describe": { "columns": [], "parameters": { @@ -18,10 +18,11 @@ } }, "Jsonb", + "Text", "Varchar" ] }, "nullable": [] }, - "hash": "2602402bedcdbc45cdc0d64a76ce075c6ae51404037b0a7d4d33faf6a7d6a6d8" + "hash": "a588f4caa014008b50eccd09122b09f8a098e58791893bacaaf2ff67a30c031c" } diff --git a/backend/.sqlx/query-d14e982c3d74499ec4bc62118e0edf065799eec5cf16f439b5f7568f392e60c3.json b/backend/.sqlx/query-a6a25545af16db9f03552ce2ac178cfda3f2ced1b8f1e60bdfc7d84c642903d2.json similarity index 86% rename from backend/.sqlx/query-d14e982c3d74499ec4bc62118e0edf065799eec5cf16f439b5f7568f392e60c3.json rename to backend/.sqlx/query-a6a25545af16db9f03552ce2ac178cfda3f2ced1b8f1e60bdfc7d84c642903d2.json index c41f61c70d7e6..c7e53d1595a7f 100644 --- a/backend/.sqlx/query-d14e982c3d74499ec4bc62118e0edf065799eec5cf16f439b5f7568f392e60c3.json +++ b/backend/.sqlx/query-a6a25545af16db9f03552ce2ac178cfda3f2ced1b8f1e60bdfc7d84c642903d2.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n EXISTS(SELECT 1 FROM websocket_trigger WHERE workspace_id = $1) AS \"websocket_used!\",\n EXISTS(SELECT 1 FROM http_trigger WHERE workspace_id = $1) AS \"http_routes_used!\",\n EXISTS(SELECT 1 FROM kafka_trigger WHERE workspace_id = $1) as \"kafka_used!\",\n EXISTS(SELECT 1 FROM nats_trigger WHERE workspace_id = $1) as \"nats_used!\",\n EXISTS(SELECT 1 FROM postgres_trigger WHERE workspace_id = $1) AS \"postgres_used!\",\n EXISTS(SELECT 1 FROM mqtt_trigger WHERE workspace_id = $1) AS \"mqtt_used!\",\n EXISTS(SELECT 1 FROM sqs_trigger WHERE workspace_id = $1) AS \"sqs_used!\",\n EXISTS(SELECT 1 FROM gcp_trigger WHERE workspace_id = $1) AS \"gcp_used!\",\n EXISTS(SELECT 1 FROM email_trigger WHERE workspace_id = $1) AS \"email_used!\",\n EXISTS(SELECT 1 FROM native_trigger WHERE workspace_id = $1 AND service_name = 'nextcloud'::native_trigger_service) AS \"nextcloud_used!\"\n ", + "query": "\n SELECT\n EXISTS(SELECT 1 FROM websocket_trigger WHERE workspace_id = $1) AS \"websocket_used!\",\n EXISTS(SELECT 1 FROM http_trigger WHERE workspace_id = $1) AS \"http_routes_used!\",\n EXISTS(SELECT 1 FROM kafka_trigger WHERE workspace_id = $1) as \"kafka_used!\",\n EXISTS(SELECT 1 FROM nats_trigger WHERE workspace_id = $1) as \"nats_used!\",\n EXISTS(SELECT 1 FROM postgres_trigger WHERE workspace_id = $1) AS \"postgres_used!\",\n EXISTS(SELECT 1 FROM mqtt_trigger WHERE workspace_id = $1) AS \"mqtt_used!\",\n EXISTS(SELECT 1 FROM sqs_trigger WHERE workspace_id = $1) AS \"sqs_used!\",\n EXISTS(SELECT 1 FROM gcp_trigger WHERE workspace_id = $1) AS \"gcp_used!\",\n EXISTS(SELECT 1 FROM email_trigger WHERE workspace_id = $1) AS \"email_used!\",\n EXISTS(SELECT 1 FROM native_trigger WHERE workspace_id = $1 AND service_name = 'nextcloud'::native_trigger_service) AS \"nextcloud_used!\",\n EXISTS(SELECT 1 FROM native_trigger WHERE workspace_id = $1 AND service_name = 'google'::native_trigger_service) AS \"google_used!\"\n ", "describe": { "columns": [ { @@ -52,6 +52,11 @@ "ordinal": 9, "name": "nextcloud_used!", "type_info": "Bool" + }, + { + "ordinal": 10, + "name": "google_used!", + "type_info": "Bool" } ], "parameters": { @@ -69,8 +74,9 @@ null, null, null, + null, null ] }, - "hash": "d14e982c3d74499ec4bc62118e0edf065799eec5cf16f439b5f7568f392e60c3" + "hash": "a6a25545af16db9f03552ce2ac178cfda3f2ced1b8f1e60bdfc7d84c642903d2" } diff --git a/backend/.sqlx/query-ab5720c0af66aba9fd7d6b842f098878c431d52bdd7b71355fc7192144b91300.json b/backend/.sqlx/query-ab5720c0af66aba9fd7d6b842f098878c431d52bdd7b71355fc7192144b91300.json new file mode 100644 index 0000000000000..730b882de99e8 --- /dev/null +++ b/backend/.sqlx/query-ab5720c0af66aba9fd7d6b842f098878c431d52bdd7b71355fc7192144b91300.json @@ -0,0 +1,18 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO variable (workspace_id, path, value, is_secret, description, account, is_oauth)\n VALUES ($1, $2, $3, true, $4, $5, true)\n ON CONFLICT (workspace_id, path) DO UPDATE\n SET value = EXCLUDED.value, account = EXCLUDED.account", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Varchar", + "Varchar", + "Varchar", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "ab5720c0af66aba9fd7d6b842f098878c431d52bdd7b71355fc7192144b91300" +} diff --git a/backend/.sqlx/query-bc1298e492d3008386d9b1b449a98156fe186832494d16a434921727e5d3314d.json b/backend/.sqlx/query-bc1298e492d3008386d9b1b449a98156fe186832494d16a434921727e5d3314d.json new file mode 100644 index 0000000000000..aff23a17e7582 --- /dev/null +++ b/backend/.sqlx/query-bc1298e492d3008386d9b1b449a98156fe186832494d16a434921727e5d3314d.json @@ -0,0 +1,24 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COUNT(*) FROM native_trigger WHERE workspace_id = $1 AND script_path = $2 AND is_flow = $3 AND service_name = 'google'", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text", + "Text", + "Bool" + ] + }, + "nullable": [ + null + ] + }, + "hash": "bc1298e492d3008386d9b1b449a98156fe186832494d16a434921727e5d3314d" +} diff --git a/backend/.sqlx/query-c0aff25f0cc3b71842b0ba9ae55b6bc5eca203bf02f46164db08580d128b860a.json b/backend/.sqlx/query-c0aff25f0cc3b71842b0ba9ae55b6bc5eca203bf02f46164db08580d128b860a.json new file mode 100644 index 0000000000000..74a9d241359f6 --- /dev/null +++ b/backend/.sqlx/query-c0aff25f0cc3b71842b0ba9ae55b6bc5eca203bf02f46164db08580d128b860a.json @@ -0,0 +1,29 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT value, account FROM variable WHERE workspace_id = $1 AND path = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "value", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "account", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Text", + "Text" + ] + }, + "nullable": [ + false, + true + ] + }, + "hash": "c0aff25f0cc3b71842b0ba9ae55b6bc5eca203bf02f46164db08580d128b860a" +} diff --git a/backend/.sqlx/query-c1757ea525295ac9a0681be83a1f9d1e70944f65562e38c078b683e09cd9fb09.json b/backend/.sqlx/query-c1757ea525295ac9a0681be83a1f9d1e70944f65562e38c078b683e09cd9fb09.json new file mode 100644 index 0000000000000..9db742f9084a5 --- /dev/null +++ b/backend/.sqlx/query-c1757ea525295ac9a0681be83a1f9d1e70944f65562e38c078b683e09cd9fb09.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT count(*) FROM account WHERE workspace_id = 'test-workspace' AND id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int4" + ] + }, + "nullable": [ + null + ] + }, + "hash": "c1757ea525295ac9a0681be83a1f9d1e70944f65562e38c078b683e09cd9fb09" +} diff --git a/backend/.sqlx/query-c2bf1109d208d3aa989b2e12c0380f54638edc40788d7417a08d08a267426b5e.json b/backend/.sqlx/query-c2bf1109d208d3aa989b2e12c0380f54638edc40788d7417a08d08a267426b5e.json new file mode 100644 index 0000000000000..6427ce5f3a52a --- /dev/null +++ b/backend/.sqlx/query-c2bf1109d208d3aa989b2e12c0380f54638edc40788d7417a08d08a267426b5e.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO resource (workspace_id, path, value, resource_type, extra_perms, created_by)\n VALUES ('test-workspace', $1, $2, $3, '{}'::jsonb, 'test-user')", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Jsonb", + "Varchar" + ] + }, + "nullable": [] + }, + "hash": "c2bf1109d208d3aa989b2e12c0380f54638edc40788d7417a08d08a267426b5e" +} diff --git a/backend/.sqlx/query-d55b6f9be03dc4b74d937285bdf4ffa40e152e72a6fe11831ea0495324e471b4.json b/backend/.sqlx/query-d55b6f9be03dc4b74d937285bdf4ffa40e152e72a6fe11831ea0495324e471b4.json new file mode 100644 index 0000000000000..1a873eecc692f --- /dev/null +++ b/backend/.sqlx/query-d55b6f9be03dc4b74d937285bdf4ffa40e152e72a6fe11831ea0495324e471b4.json @@ -0,0 +1,97 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n external_id,\n workspace_id,\n service_name AS \"service_name!: ServiceName\",\n script_path,\n is_flow,\n webhook_token_prefix,\n service_config,\n error,\n created_at,\n updated_at\n FROM native_trigger\n WHERE external_id = $1 AND service_name = $2\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "external_id", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "workspace_id", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "service_name!: ServiceName", + "type_info": { + "Custom": { + "name": "native_trigger_service", + "kind": { + "Enum": [ + "nextcloud", + "google" + ] + } + } + } + }, + { + "ordinal": 3, + "name": "script_path", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "is_flow", + "type_info": "Bool" + }, + { + "ordinal": 5, + "name": "webhook_token_prefix", + "type_info": "Varchar" + }, + { + "ordinal": 6, + "name": "service_config", + "type_info": "Jsonb" + }, + { + "ordinal": 7, + "name": "error", + "type_info": "Text" + }, + { + "ordinal": 8, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 9, + "name": "updated_at", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Text", + { + "Custom": { + "name": "native_trigger_service", + "kind": { + "Enum": [ + "nextcloud", + "google" + ] + } + } + } + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + true, + true, + false, + false + ] + }, + "hash": "d55b6f9be03dc4b74d937285bdf4ffa40e152e72a6fe11831ea0495324e471b4" +} diff --git a/backend/.sqlx/query-e241023a0d7b24adf7940ae764f14136b6d19fefbd8389e5ecd3bfc9bd652632.json b/backend/.sqlx/query-e241023a0d7b24adf7940ae764f14136b6d19fefbd8389e5ecd3bfc9bd652632.json new file mode 100644 index 0000000000000..b84513ca52b2b --- /dev/null +++ b/backend/.sqlx/query-e241023a0d7b24adf7940ae764f14136b6d19fefbd8389e5ecd3bfc9bd652632.json @@ -0,0 +1,33 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT oauth_data FROM workspace_integrations\n WHERE workspace_id = $1 AND service_name = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "oauth_data", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Text", + { + "Custom": { + "name": "native_trigger_service", + "kind": { + "Enum": [ + "nextcloud", + "google" + ] + } + } + } + ] + }, + "nullable": [ + true + ] + }, + "hash": "e241023a0d7b24adf7940ae764f14136b6d19fefbd8389e5ecd3bfc9bd652632" +} diff --git a/backend/.sqlx/query-e4c508a9bc69ccb4b32cf50caa97f9a2ff7c5990df953296d7227c1c81bc5130.json b/backend/.sqlx/query-e4c508a9bc69ccb4b32cf50caa97f9a2ff7c5990df953296d7227c1c81bc5130.json new file mode 100644 index 0000000000000..6a73fa7d80d18 --- /dev/null +++ b/backend/.sqlx/query-e4c508a9bc69ccb4b32cf50caa97f9a2ff7c5990df953296d7227c1c81bc5130.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE variable SET value = $1 WHERE workspace_id = 'test-workspace' AND path = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Text" + ] + }, + "nullable": [] + }, + "hash": "e4c508a9bc69ccb4b32cf50caa97f9a2ff7c5990df953296d7227c1c81bc5130" +} diff --git a/backend/.sqlx/query-e80dc984cd1d2388cbf17206ad059137cf7f92d0222382af1a66de807f3138e8.json b/backend/.sqlx/query-e80dc984cd1d2388cbf17206ad059137cf7f92d0222382af1a66de807f3138e8.json new file mode 100644 index 0000000000000..108c82c2bd4cf --- /dev/null +++ b/backend/.sqlx/query-e80dc984cd1d2388cbf17206ad059137cf7f92d0222382af1a66de807f3138e8.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO variable (workspace_id, path, value, is_secret, description, account, is_oauth)\n VALUES ('test-workspace', $1, $2, true, 'test oauth token', $3, true)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Varchar", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "e80dc984cd1d2388cbf17206ad059137cf7f92d0222382af1a66de807f3138e8" +} diff --git a/backend/.sqlx/query-f5460eb13ea4f0e9896928a3266e419090f72e54f7347b35254a661116ba822d.json b/backend/.sqlx/query-f5460eb13ea4f0e9896928a3266e419090f72e54f7347b35254a661116ba822d.json new file mode 100644 index 0000000000000..77f8950a4b81b --- /dev/null +++ b/backend/.sqlx/query-f5460eb13ea4f0e9896928a3266e419090f72e54f7347b35254a661116ba822d.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE workspace_integrations SET resource_path = $1 WHERE workspace_id = $2 AND resource_path = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Text", + "Text" + ] + }, + "nullable": [] + }, + "hash": "f5460eb13ea4f0e9896928a3266e419090f72e54f7347b35254a661116ba822d" +} diff --git a/backend/Cargo.lock b/backend/Cargo.lock index aedbf137e4446..c949009de7793 100644 --- a/backend/Cargo.lock +++ b/backend/Cargo.lock @@ -16159,8 +16159,10 @@ dependencies = [ "sqlx", "tokio", "uuid", + "windmill-api-auth", "windmill-api-client", "windmill-common", + "windmill-native-triggers", "windmill-test-utils", ] diff --git a/backend/ee-repo-ref.txt b/backend/ee-repo-ref.txt index 0edac026f7de5..00b0a2f9dba9d 100644 --- a/backend/ee-repo-ref.txt +++ b/backend/ee-repo-ref.txt @@ -1 +1 @@ -e7f80bca9320580e1cb96b4f4ca9942649abce7f \ No newline at end of file +63cc9da5540d1c8e6068177f2010d2f35abce367 \ No newline at end of file diff --git a/backend/migrations/20260206113137_google_native_triggers.down.sql b/backend/migrations/20260206113137_google_native_triggers.down.sql new file mode 100644 index 0000000000000..b876835c2efdf --- /dev/null +++ b/backend/migrations/20260206113137_google_native_triggers.down.sql @@ -0,0 +1,11 @@ +-- Note: PostgreSQL does not support removing enum values directly. +-- This down migration is a placeholder. To fully reverse, you would need to: +-- 1. Create a new enum type without the values +-- 2. Update all columns to use the new type +-- 3. Drop the old type +-- 4. Rename the new type + +-- For now, we just document what was added: +-- Removed from native_trigger_service: 'google', 'google_drive', 'google_calendar' +-- Removed from TRIGGER_KIND: 'google_drive', 'google_calendar' +-- Removed from job_trigger_kind: 'google_drive', 'google_calendar' diff --git a/backend/migrations/20260206113137_google_native_triggers.up.sql b/backend/migrations/20260206113137_google_native_triggers.up.sql new file mode 100644 index 0000000000000..6a231aef0dc07 --- /dev/null +++ b/backend/migrations/20260206113137_google_native_triggers.up.sql @@ -0,0 +1,10 @@ +-- Add Google to native_trigger_service enum +-- 'google' is a unified service that handles both Drive and Calendar triggers +-- The trigger_type field in service_config determines which Google service is used +ALTER TYPE native_trigger_service ADD VALUE IF NOT EXISTS 'google'; + +-- Add to TRIGGER_KIND enum (used for trigger tracking) +ALTER TYPE TRIGGER_KIND ADD VALUE IF NOT EXISTS 'google'; + +-- Add to job_trigger_kind enum (used for job tracking) +ALTER TYPE job_trigger_kind ADD VALUE IF NOT EXISTS 'google'; diff --git a/backend/migrations/20260212100000_native_trigger_oauth_to_resource.down.sql b/backend/migrations/20260212100000_native_trigger_oauth_to_resource.down.sql new file mode 100644 index 0000000000000..22be42fd95511 --- /dev/null +++ b/backend/migrations/20260212100000_native_trigger_oauth_to_resource.down.sql @@ -0,0 +1,3 @@ +ALTER TABLE account DROP COLUMN IF EXISTS is_workspace_integration; +ALTER TABLE workspace_integrations ALTER COLUMN oauth_data SET NOT NULL; +ALTER TABLE workspace_integrations DROP COLUMN IF EXISTS resource_path; \ No newline at end of file diff --git a/backend/migrations/20260212100000_native_trigger_oauth_to_resource.up.sql b/backend/migrations/20260212100000_native_trigger_oauth_to_resource.up.sql new file mode 100644 index 0000000000000..4c8bf4c2222fc --- /dev/null +++ b/backend/migrations/20260212100000_native_trigger_oauth_to_resource.up.sql @@ -0,0 +1,10 @@ +-- Migrate native trigger OAuth tokens from workspace_integrations to account+variable+resource pattern + +-- Add flag to distinguish workspace integration accounts from regular user OAuth accounts +ALTER TABLE account ADD COLUMN is_workspace_integration BOOLEAN NOT NULL DEFAULT false; + +-- Make oauth_data nullable since it will only store client config (no tokens) going forward +ALTER TABLE workspace_integrations ALTER COLUMN oauth_data DROP NOT NULL; + +-- Add resource_path column to workspace_integrations +ALTER TABLE workspace_integrations ADD COLUMN IF NOT EXISTS resource_path TEXT; diff --git a/backend/migrations/20260213000000_worker_ping_native_mode.down.sql b/backend/migrations/20260213000000_worker_ping_native_mode.down.sql new file mode 100644 index 0000000000000..77ebbd9daea55 --- /dev/null +++ b/backend/migrations/20260213000000_worker_ping_native_mode.down.sql @@ -0,0 +1 @@ +ALTER TABLE worker_ping DROP COLUMN IF EXISTS native_mode; diff --git a/backend/migrations/20260213000000_worker_ping_native_mode.up.sql b/backend/migrations/20260213000000_worker_ping_native_mode.up.sql new file mode 100644 index 0000000000000..9966cc654300e --- /dev/null +++ b/backend/migrations/20260213000000_worker_ping_native_mode.up.sql @@ -0,0 +1 @@ +ALTER TABLE worker_ping ADD COLUMN IF NOT EXISTS native_mode BOOLEAN NOT NULL DEFAULT false; diff --git a/backend/src/main.rs b/backend/src/main.rs index 83fe5b7a23afa..e91bf8ce96362 100644 --- a/backend/src/main.rs +++ b/backend/src/main.rs @@ -41,16 +41,16 @@ use windmill_common::{ CRITICAL_ERROR_CHANNELS_SETTING, CUSTOM_TAGS_SETTING, DEFAULT_TAGS_PER_WORKSPACE_SETTING, DEFAULT_TAGS_WORKSPACES_SETTING, EMAIL_DOMAIN_SETTING, ENV_SETTINGS, EXPOSE_DEBUG_METRICS_SETTING, EXPOSE_METRICS_SETTING, EXTRA_PIP_INDEX_URL_SETTING, - JOB_ISOLATION_SETTING, HUB_API_SECRET_SETTING, HUB_BASE_URL_SETTING, INDEXER_SETTING, - INSTANCE_PYTHON_VERSION_SETTING, JOB_DEFAULT_TIMEOUT_SECS_SETTING, JWT_SECRET_SETTING, - KEEP_JOB_DIR_SETTING, LICENSE_KEY_SETTING, MAVEN_REPOS_SETTING, + HUB_API_SECRET_SETTING, HUB_BASE_URL_SETTING, INDEXER_SETTING, + INSTANCE_PYTHON_VERSION_SETTING, JOB_DEFAULT_TIMEOUT_SECS_SETTING, JOB_ISOLATION_SETTING, + JWT_SECRET_SETTING, KEEP_JOB_DIR_SETTING, LICENSE_KEY_SETTING, MAVEN_REPOS_SETTING, MONITOR_LOGS_ON_OBJECT_STORE_SETTING, NO_DEFAULT_MAVEN_SETTING, NPM_CONFIG_REGISTRY_SETTING, NUGET_CONFIG_SETTING, OAUTH_SETTING, OTEL_SETTING, - OTEL_TRACING_PROXY_SETTING, PIP_INDEX_URL_SETTING, UV_INDEX_STRATEGY_SETTING, - POWERSHELL_REPO_PAT_SETTING, POWERSHELL_REPO_URL_SETTING, REQUEST_SIZE_LIMIT_SETTING, + OTEL_TRACING_PROXY_SETTING, PIP_INDEX_URL_SETTING, POWERSHELL_REPO_PAT_SETTING, + POWERSHELL_REPO_URL_SETTING, REQUEST_SIZE_LIMIT_SETTING, REQUIRE_PREEXISTING_USER_FOR_OAUTH_SETTING, RETENTION_PERIOD_SECS_SETTING, RUBY_REPOS_SETTING, SAML_METADATA_SETTING, SCIM_TOKEN_SETTING, SMTP_SETTING, TEAMS_SETTING, - TIMEOUT_WAIT_RESULT_SETTING, + TIMEOUT_WAIT_RESULT_SETTING, UV_INDEX_STRATEGY_SETTING, }, scripts::ScriptLang, stats_oss::schedule_stats, @@ -60,8 +60,8 @@ use windmill_common::{ MODE_AND_ADDONS, }, worker::{ - reload_custom_tags_setting, Connection, HUB_CACHE_DIR, HUB_RT_CACHE_DIR, TMP_DIR, - TMP_LOGS_DIR, WORKER_GROUP, + reload_custom_tags_setting, Connection, HUB_CACHE_DIR, HUB_RT_CACHE_DIR, NATIVE_MODE, + TMP_DIR, TMP_LOGS_DIR, WORKER_GROUP, }, KillpillSender, DEFAULT_HUB_BASE_URL, METRICS_ENABLED, }; @@ -99,12 +99,11 @@ use crate::monitor::{ reload_app_workspaced_route_setting, reload_base_url_setting, reload_bunfig_install_scopes_setting, reload_critical_alert_mute_ui_setting, reload_critical_error_channels_setting, reload_extra_pip_index_url_setting, - reload_job_isolation_setting, reload_hub_api_secret_setting, reload_hub_base_url_setting, - reload_job_default_timeout_setting, reload_jwt_secret_setting, reload_license_key, - reload_npm_config_registry_setting, - reload_otel_tracing_proxy_setting, reload_pip_index_url_setting, - reload_retention_period_setting, reload_scim_token_setting, reload_smtp_config, - reload_uv_index_strategy_setting, reload_worker_config, MonitorIteration, + reload_hub_api_secret_setting, reload_hub_base_url_setting, reload_job_default_timeout_setting, + reload_job_isolation_setting, reload_jwt_secret_setting, reload_license_key, + reload_npm_config_registry_setting, reload_otel_tracing_proxy_setting, + reload_pip_index_url_setting, reload_retention_period_setting, reload_scim_token_setting, + reload_smtp_config, reload_uv_index_strategy_setting, reload_worker_config, MonitorIteration, }; #[cfg(feature = "parquet")] @@ -613,6 +612,9 @@ async fn windmill_main() -> anyhow::Result<()> { #[allow(unused_mut)] let mut num_workers = if mode == Mode::Server || mode == Mode::Indexer || mode == Mode::MCP { 0 + } else if *NATIVE_MODE { + println!("NATIVE_MODE enabled: forcing NUM_WORKERS=8"); + 8 } else { std::env::var("NUM_WORKERS") .ok() @@ -620,10 +622,19 @@ async fn windmill_main() -> anyhow::Result<()> { .unwrap_or(DEFAULT_NUM_WORKERS as i32) }; - // TODO: maybe gate behind debug_assertions? - if num_workers > 1 && !std::env::var("WORKER_GROUP").is_ok_and(|x| x == "native") { + if num_workers > 1 && !*NATIVE_MODE && *WORKER_GROUP != "native" { + if !std::env::var("I_ACK_NUM_WORKERS_IS_UNSAFE").is_ok_and(|x| x == "1" || x == "true") { + eprintln!( + "ERROR: NUM_WORKERS > 1 is only safe for native workers. \ + Set NATIVE_MODE=true for native-only workers, or set \ + I_ACK_NUM_WORKERS_IS_UNSAFE=1 to bypass this check at your own risk." + ); + std::process::exit(1); + } println!( - "We STRONGLY recommend using at most 1 worker per container, use at your own risks" + "WARNING: Running with NUM_WORKERS={} without native mode. \ + This is not recommended. Use at your own risk.", + num_workers ); } diff --git a/backend/src/monitor.rs b/backend/src/monitor.rs index e8bbe5c08bbfd..f5a6d98383619 100644 --- a/backend/src/monitor.rs +++ b/backend/src/monitor.rs @@ -239,11 +239,13 @@ pub async fn initial_load( Connection::Http(_) => { // TODO: reload worker config from http let mut config = WORKER_CONFIG.write().await; + let worker_tags = DECODED_AGENT_TOKEN + .as_ref() + .map(|x| x.tags.clone()) + .unwrap_or_default(); + let native_mode = windmill_common::worker::is_native_mode(&worker_tags); *config = WorkerConfig { - worker_tags: DECODED_AGENT_TOKEN - .as_ref() - .map(|x| x.tags.clone()) - .unwrap_or_default(), + worker_tags, env_vars: load_env_vars( load_whitelist_env_vars_from_env(), &std::collections::HashMap::new(), @@ -257,6 +259,7 @@ pub async fn initial_load( cache_clear: None, additional_python_paths: None, pip_local_dependencies: None, + native_mode, }; } } diff --git a/backend/windmill-api-integration-tests/Cargo.toml b/backend/windmill-api-integration-tests/Cargo.toml index 1936a13fd97ae..c27a10775e36f 100644 --- a/backend/windmill-api-integration-tests/Cargo.toml +++ b/backend/windmill-api-integration-tests/Cargo.toml @@ -19,6 +19,8 @@ mcp = [] windmill-test-utils.workspace = true windmill-api-client.workspace = true windmill-common = { workspace = true, default-features = false } +windmill-native-triggers = { workspace = true, features = ["native_trigger"] } +windmill-api-auth.workspace = true sqlx.workspace = true serde_json.workspace = true serde.workspace = true diff --git a/backend/windmill-api-integration-tests/tests/native_triggers.rs b/backend/windmill-api-integration-tests/tests/native_triggers.rs new file mode 100644 index 0000000000000..a03542edefa0c --- /dev/null +++ b/backend/windmill-api-integration-tests/tests/native_triggers.rs @@ -0,0 +1,595 @@ +/*! + * Integration tests for the native trigger system (Google). + * + * Tests cover 4 business-logic areas: + * 1. Resource path change — cleanup old path, recreate at new path + * 2. Config loading — workspace-level, instance-level, token update + * 3. Channel expiration renewal — should_renew_channel pure logic + * 4. Delete workspace integration — full cascade, cleanup preserves triggers, parse_stop_channel_params + */ + +use serde_json::json; +use sqlx::{Pool, Postgres}; + +use windmill_api_auth::ApiAuthed; +use windmill_common::variables::{build_crypt, encrypt}; +use windmill_native_triggers::{ + decrypt_oauth_data, delete_native_trigger, delete_workspace_integration, + get_workspace_integration, + google::{parse_stop_channel_params, should_renew_channel}, + store_native_trigger, store_workspace_integration, NativeTriggerConfig, OAuthConfig, + ServiceName, +}; + +// ============================================================================ +// Helpers +// ============================================================================ + +async fn insert_test_script(db: &Pool, path: &str) -> anyhow::Result { + let hash: i64 = rand::random::().unsigned_abs() as i64; + sqlx::query( + "INSERT INTO script (workspace_id, hash, path, summary, description, content, + created_by, language, kind, lock) + VALUES ('test-workspace', $1, $2, '', '', 'def main(): pass', + 'test-user', 'python3', 'script', '')", + ) + .bind(hash) + .bind(path) + .execute(db) + .await?; + Ok(hash) +} + +fn test_authed() -> ApiAuthed { + ApiAuthed { + email: "test@windmill.dev".to_string(), + username: "test-user".to_string(), + is_admin: true, + is_operator: false, + groups: vec!["all".to_string()], + folders: vec![], + scopes: None, + username_override: None, + token_prefix: None, + } +} + +/// Set up a complete workspace integration with account+variable+resource. +/// Returns (resource_path, account_id). +async fn setup_oauth_integration( + db: &Pool, + service_name: ServiceName, + resource_path: &str, + access_token: &str, + refresh_token: &str, + oauth_data_override: Option, +) -> anyhow::Result { + // 1. Create account with is_workspace_integration=true + let account_id: i32 = sqlx::query_scalar!( + "INSERT INTO account (workspace_id, client, expires_at, refresh_token, is_workspace_integration) + VALUES ('test-workspace', $1, now() + interval '1 hour', $2, true) + RETURNING id", + service_name.as_str(), + refresh_token, + ) + .fetch_one(db) + .await?; + + // 2. Encrypt and create variable + let mc = build_crypt(db, "test-workspace").await?; + let encrypted = encrypt(&mc, access_token); + + sqlx::query!( + "INSERT INTO variable (workspace_id, path, value, is_secret, description, account, is_oauth) + VALUES ('test-workspace', $1, $2, true, 'test oauth token', $3, true)", + resource_path, + encrypted, + account_id, + ) + .execute(db) + .await?; + + // 3. Create resource + let resource_value = json!({ "token": format!("$var:{}", resource_path) }); + sqlx::query!( + "INSERT INTO resource (workspace_id, path, value, resource_type, extra_perms, created_by) + VALUES ('test-workspace', $1, $2, $3, '{}'::jsonb, 'test-user')", + resource_path, + resource_value, + service_name.resource_type(), + ) + .execute(db) + .await?; + + // 4. Store workspace integration with resource_path + let oauth_data = oauth_data_override.unwrap_or_else(|| { + json!({ + "client_id": "test-client-id", + "client_secret": "test-client-secret", + "base_url": "https://example.com", + "resource_path": resource_path, + }) + }); + + let authed = test_authed(); + let mut tx = db.begin().await?; + store_workspace_integration( + &mut *tx, + &authed, + "test-workspace", + service_name, + oauth_data, + Some(resource_path), + ) + .await?; + tx.commit().await?; + + Ok(account_id) +} + +fn now_ms() -> i64 { + std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_millis() as i64 +} + +// ============================================================================ +// 1. Resource Path Change +// ============================================================================ + +#[sqlx::test(migrations = "../migrations", fixtures("base"))] +async fn test_resource_path_change(db: Pool) -> anyhow::Result<()> { + let path_a = "u/test-user/native_gworkspace"; + setup_oauth_integration( + &db, + ServiceName::Google, + path_a, + "token-a", + "refresh-a", + None, + ) + .await?; + + // Verify decrypt works at path A + let config: OAuthConfig = + decrypt_oauth_data(&db, &db, "test-workspace", ServiceName::Google).await?; + assert_eq!(config.access_token, "token-a"); + + // Cleanup old path + let mut tx = db.begin().await?; + windmill_native_triggers::workspace_integrations::cleanup_oauth_resource( + &mut *tx, + "test-workspace", + ServiceName::Google, + ) + .await; + tx.commit().await?; + + // Recreate at path B + let path_b = "u/test-user/native_gworkspace_v2"; + setup_oauth_integration( + &db, + ServiceName::Google, + path_b, + "token-b", + "refresh-b", + None, + ) + .await?; + + // Path A resources should be gone + let var_count: i64 = sqlx::query_scalar!( + "SELECT count(*) FROM variable WHERE workspace_id = 'test-workspace' AND path = $1", + path_a, + ) + .fetch_one(&db) + .await? + .unwrap_or(0); + assert_eq!(var_count, 0, "variable at old path should be deleted"); + + let res_count: i64 = sqlx::query_scalar!( + "SELECT count(*) FROM resource WHERE workspace_id = 'test-workspace' AND path = $1", + path_a, + ) + .fetch_one(&db) + .await? + .unwrap_or(0); + assert_eq!(res_count, 0, "resource at old path should be deleted"); + + // Path B should work + let config: OAuthConfig = + decrypt_oauth_data(&db, &db, "test-workspace", ServiceName::Google).await?; + assert_eq!(config.access_token, "token-b"); + assert_eq!(config.refresh_token.as_deref(), Some("refresh-b")); + + Ok(()) +} + +// ============================================================================ +// 2. Config Loading — workspace vs instance + token update +// ============================================================================ + +#[sqlx::test(migrations = "../migrations", fixtures("base"))] +async fn test_decrypt_workspace_level(db: Pool) -> anyhow::Result<()> { + let resource_path = "u/test-user/native_gworkspace"; + setup_oauth_integration( + &db, + ServiceName::Google, + resource_path, + "ws-access-token", + "ws-refresh-token", + None, + ) + .await?; + + let config: OAuthConfig = + decrypt_oauth_data(&db, &db, "test-workspace", ServiceName::Google).await?; + + assert_eq!(config.access_token, "ws-access-token"); + assert_eq!(config.refresh_token.as_deref(), Some("ws-refresh-token")); + assert_eq!(config.client_id, "test-client-id"); + assert_eq!(config.client_secret, "test-client-secret"); + assert_eq!(config.base_url, "https://example.com"); + + Ok(()) +} + +#[sqlx::test(migrations = "../migrations", fixtures("base"))] +async fn test_decrypt_instance_level(db: Pool) -> anyhow::Result<()> { + // Insert instance-level credentials into global_settings + sqlx::query!( + "INSERT INTO global_settings (name, value) VALUES ('oauths', $1) + ON CONFLICT (name) DO UPDATE SET value = $1", + json!({ + "gworkspace": { + "id": "instance-client-id", + "secret": "instance-client-secret" + } + }), + ) + .execute(&db) + .await?; + + let resource_path = "u/test-user/native_gworkspace"; + let oauth_data = json!({ + "instance_shared": true, + "base_url": "https://accounts.google.com", + "resource_path": resource_path, + }); + + setup_oauth_integration( + &db, + ServiceName::Google, + resource_path, + "inst-access-token", + "inst-refresh-token", + Some(oauth_data), + ) + .await?; + + let config: OAuthConfig = + decrypt_oauth_data(&db, &db, "test-workspace", ServiceName::Google).await?; + + assert_eq!(config.client_id, "instance-client-id"); + assert_eq!(config.client_secret, "instance-client-secret"); + assert_eq!(config.access_token, "inst-access-token"); + + Ok(()) +} + +#[sqlx::test(migrations = "../migrations", fixtures("base"))] +async fn test_token_update_persists(db: Pool) -> anyhow::Result<()> { + let resource_path = "u/test-user/native_gworkspace"; + let account_id = setup_oauth_integration( + &db, + ServiceName::Google, + resource_path, + "old-access-token", + "old-refresh-token", + None, + ) + .await?; + + // Verify old tokens + let config: OAuthConfig = + decrypt_oauth_data(&db, &db, "test-workspace", ServiceName::Google).await?; + assert_eq!(config.access_token, "old-access-token"); + + // Simulate token refresh: update variable + account + let mc = build_crypt(&db, "test-workspace").await?; + let new_encrypted = encrypt(&mc, "new-access-token"); + sqlx::query!( + "UPDATE variable SET value = $1 WHERE workspace_id = 'test-workspace' AND path = $2", + new_encrypted, + resource_path, + ) + .execute(&db) + .await?; + + sqlx::query!( + "UPDATE account SET refresh_token = $1 WHERE workspace_id = 'test-workspace' AND id = $2", + "new-refresh-token", + account_id, + ) + .execute(&db) + .await?; + + // Verify new tokens + let config: OAuthConfig = + decrypt_oauth_data(&db, &db, "test-workspace", ServiceName::Google).await?; + assert_eq!(config.access_token, "new-access-token"); + assert_eq!(config.refresh_token.as_deref(), Some("new-refresh-token")); + + Ok(()) +} + +// ============================================================================ +// 3. Channel Expiration Renewal — should_renew_channel +// ============================================================================ + +#[test] +fn test_should_renew_drive_channel_expired() { + let config = json!({ + "triggerType": "drive", + "expiration": (now_ms() - 1000).to_string(), + }); + assert!(should_renew_channel(&config)); +} + +#[test] +fn test_should_renew_drive_channel_within_window() { + // 30 minutes remaining — within the 1-hour Drive renewal window + let config = json!({ + "triggerType": "drive", + "expiration": (now_ms() + 30 * 60 * 1000).to_string(), + }); + assert!(should_renew_channel(&config)); +} + +#[test] +fn test_should_renew_drive_channel_not_yet() { + // 2 hours remaining — outside the 1-hour Drive renewal window + let config = json!({ + "triggerType": "drive", + "expiration": (now_ms() + 2 * 60 * 60 * 1000).to_string(), + }); + assert!(!should_renew_channel(&config)); +} + +#[test] +fn test_should_renew_calendar_channel_within_window() { + // 12 hours remaining — within the 1-day Calendar renewal window + let config = json!({ + "triggerType": "calendar", + "expiration": (now_ms() + 12 * 60 * 60 * 1000).to_string(), + }); + assert!(should_renew_channel(&config)); +} + +#[test] +fn test_should_renew_calendar_channel_not_yet() { + // 2 days remaining — outside the 1-day Calendar renewal window + let config = json!({ + "triggerType": "calendar", + "expiration": (now_ms() + 2 * 24 * 60 * 60 * 1000).to_string(), + }); + assert!(!should_renew_channel(&config)); +} + +#[test] +fn test_should_renew_channel_zero_expiration() { + let config = json!({ + "triggerType": "drive", + "expiration": "0", + }); + assert!(!should_renew_channel(&config)); +} + +#[test] +fn test_should_renew_channel_missing_fields() { + assert!(!should_renew_channel(&json!({}))); +} + +// ============================================================================ +// 4. Delete Workspace Integration +// ============================================================================ + +#[sqlx::test(migrations = "../migrations", fixtures("base"))] +async fn test_delete_integration_full_cascade(db: Pool) -> anyhow::Result<()> { + let resource_path = "u/test-user/native_gworkspace"; + let account_id = setup_oauth_integration( + &db, + ServiceName::Google, + resource_path, + "token", + "refresh", + None, + ) + .await?; + + // Add a native trigger linked to this integration + insert_test_script(&db, "f/test/handler").await?; + let trigger_config = NativeTriggerConfig { + script_path: "f/test/handler".to_string(), + is_flow: false, + webhook_token: "abcdefghij1234567890".to_string(), + }; + store_native_trigger( + &db, + "test-workspace", + ServiceName::Google, + "ext-1", + &trigger_config, + json!({"triggerType": "drive"}), + ) + .await?; + + // Step 1: Delete triggers + let deleted = + delete_native_trigger(&db, "test-workspace", ServiceName::Google, "ext-1").await?; + assert!(deleted); + + // Step 2: Cleanup OAuth resources + let mut tx = db.begin().await?; + windmill_native_triggers::workspace_integrations::cleanup_oauth_resource( + &mut *tx, + "test-workspace", + ServiceName::Google, + ) + .await; + tx.commit().await?; + + // Step 3: Delete workspace integration + let mut tx = db.begin().await?; + let deleted = + delete_workspace_integration(&mut *tx, "test-workspace", ServiceName::Google).await?; + tx.commit().await?; + assert!(deleted); + + // Verify everything is gone + let var_count: i64 = sqlx::query_scalar!( + "SELECT count(*) FROM variable WHERE workspace_id = 'test-workspace' AND path = $1", + resource_path, + ) + .fetch_one(&db) + .await? + .unwrap_or(0); + assert_eq!(var_count, 0); + + let acc_count: i64 = sqlx::query_scalar!( + "SELECT count(*) FROM account WHERE workspace_id = 'test-workspace' AND id = $1", + account_id, + ) + .fetch_one(&db) + .await? + .unwrap_or(0); + assert_eq!(acc_count, 0); + + let res_count: i64 = sqlx::query_scalar!( + "SELECT count(*) FROM resource WHERE workspace_id = 'test-workspace' AND path = $1", + resource_path, + ) + .fetch_one(&db) + .await? + .unwrap_or(0); + assert_eq!(res_count, 0); + + assert!( + get_workspace_integration(&db, "test-workspace", ServiceName::Google) + .await + .is_err() + ); + + Ok(()) +} + +#[sqlx::test(migrations = "../migrations", fixtures("base"))] +async fn test_cleanup_preserves_triggers(db: Pool) -> anyhow::Result<()> { + let resource_path = "u/test-user/native_gworkspace"; + setup_oauth_integration( + &db, + ServiceName::Google, + resource_path, + "token", + "refresh", + None, + ) + .await?; + + // Create a trigger + insert_test_script(&db, "f/test/handler").await?; + let trigger_config = NativeTriggerConfig { + script_path: "f/test/handler".to_string(), + is_flow: false, + webhook_token: "abcdefghij1234567890".to_string(), + }; + store_native_trigger( + &db, + "test-workspace", + ServiceName::Google, + "ext-1", + &trigger_config, + json!({"triggerType": "drive"}), + ) + .await?; + + // Cleanup OAuth only — should NOT remove the trigger + let mut tx = db.begin().await?; + windmill_native_triggers::workspace_integrations::cleanup_oauth_resource( + &mut *tx, + "test-workspace", + ServiceName::Google, + ) + .await; + tx.commit().await?; + + // OAuth resources gone + let var_count: i64 = sqlx::query_scalar!( + "SELECT count(*) FROM variable WHERE workspace_id = 'test-workspace' AND path = $1", + resource_path, + ) + .fetch_one(&db) + .await? + .unwrap_or(0); + assert_eq!(var_count, 0); + + // Trigger still exists + let trigger_count: i64 = sqlx::query_scalar!( + "SELECT count(*) FROM native_trigger WHERE workspace_id = 'test-workspace' AND service_name = 'google'" + ) + .fetch_one(&db) + .await? + .unwrap_or(0); + assert_eq!(trigger_count, 1, "trigger should survive OAuth cleanup"); + + Ok(()) +} + +// --- parse_stop_channel_params --- + +#[test] +fn test_parse_stop_channel_params_drive() { + let config = json!({ + "triggerType": "drive", + "googleResourceId": "res-123", + }); + let (resource_id, url) = parse_stop_channel_params(&config); + assert_eq!(resource_id, "res-123"); + assert!( + url.contains("googleapis.com/drive/v3/channels/stop"), + "url={}", + url + ); +} + +#[test] +fn test_parse_stop_channel_params_calendar() { + let config = json!({ + "triggerType": "calendar", + "googleResourceId": "res-456", + }); + let (resource_id, url) = parse_stop_channel_params(&config); + assert_eq!(resource_id, "res-456"); + assert!( + url.contains("googleapis.com/calendar/v3/channels/stop"), + "url={}", + url + ); +} + +#[test] +fn test_parse_stop_channel_params_default() { + // Missing triggerType defaults to Drive + let config = json!({ "googleResourceId": "res-789" }); + let (resource_id, url) = parse_stop_channel_params(&config); + assert_eq!(resource_id, "res-789"); + assert!(url.contains("drive/v3/channels/stop"), "url={}", url); +} + +#[test] +fn test_parse_stop_channel_params_missing_resource_id() { + let config = json!({ "triggerType": "drive" }); + let (resource_id, _url) = parse_stop_channel_params(&config); + assert_eq!(resource_id, ""); +} diff --git a/backend/windmill-api-workers/src/lib.rs b/backend/windmill-api-workers/src/lib.rs index 93c9deae848d6..6cfa398ac44f2 100644 --- a/backend/windmill-api-workers/src/lib.rs +++ b/backend/windmill-api-workers/src/lib.rs @@ -24,7 +24,7 @@ use windmill_common::{ DB, }; -use windmill_api_auth::{ApiAuthed, require_super_admin}; +use windmill_api_auth::{require_super_admin, ApiAuthed}; pub fn global_service() -> Router { Router::new() @@ -76,6 +76,8 @@ struct WorkerPing { wm_memory_usage: Option, #[serde(skip_serializing_if = "Option::is_none")] job_isolation: Option, + #[serde(skip_serializing_if = "Option::is_none")] + native_mode: Option, } // #[derive(Serialize, Deserialize)] @@ -108,7 +110,7 @@ async fn list_worker_pings( WorkerPing, "SELECT worker, worker_instance, EXTRACT(EPOCH FROM (now() - ping_at))::integer as last_ping, started_at, ip, jobs_executed, CASE WHEN $4 IS TRUE THEN current_job_id ELSE NULL END as last_job_id, CASE WHEN $4 IS TRUE THEN current_job_workspace_id ELSE NULL END as last_job_workspace_id, - custom_tags, worker_group, wm_version, occupancy_rate, occupancy_rate_15s, occupancy_rate_5m, occupancy_rate_30m, memory, vcpus, memory_usage, wm_memory_usage, job_isolation + custom_tags, worker_group, wm_version, occupancy_rate, occupancy_rate_15s, occupancy_rate_5m, occupancy_rate_30m, memory, vcpus, memory_usage, wm_memory_usage, job_isolation, native_mode FROM worker_ping WHERE ($1::integer IS NULL AND ping_at > now() - interval '5 minute') OR (ping_at > now() - ($1 || ' seconds')::interval) ORDER BY ping_at desc LIMIT $2 OFFSET $3", diff --git a/backend/windmill-api-workspaces/src/workspaces.rs b/backend/windmill-api-workspaces/src/workspaces.rs index f3581b787abeb..c2a01631d0429 100644 --- a/backend/windmill-api-workspaces/src/workspaces.rs +++ b/backend/windmill-api-workspaces/src/workspaces.rs @@ -25,8 +25,8 @@ use regex::Regex; use hex; use sha2::{Digest, Sha256}; use std::collections::{HashMap, HashSet}; +use strum::IntoEnumIterator; use uuid::Uuid; -use strum::{IntoEnumIterator}; use windmill_audit::audit_oss::{audit_log, AuditAuthorable}; use windmill_audit::ActionKind; use windmill_common::db::UserDB; @@ -39,7 +39,9 @@ use windmill_common::workspaces::GitRepositorySettings; #[cfg(feature = "enterprise")] use windmill_common::workspaces::WorkspaceDeploymentUISettings; use windmill_common::workspaces::{ - check_user_against_rule, get_datatable_resource_from_db_unchecked, DataTable, DataTableCatalogResourceType, ProtectionRuleKind, ProtectionRules, ProtectionRuleset, RuleCheckResult, WorkspaceGitSyncSettings + check_user_against_rule, get_datatable_resource_from_db_unchecked, DataTable, + DataTableCatalogResourceType, ProtectionRuleKind, ProtectionRules, ProtectionRuleset, + RuleCheckResult, WorkspaceGitSyncSettings, }; use windmill_common::workspaces::{Ducklake, DucklakeCatalogResourceType}; use windmill_common::PgDatabase; @@ -2494,6 +2496,7 @@ struct UsedTriggers { pub gcp_used: bool, pub email_used: bool, pub nextcloud_used: bool, + pub google_used: bool, } async fn get_used_triggers( @@ -2515,7 +2518,8 @@ async fn get_used_triggers( EXISTS(SELECT 1 FROM sqs_trigger WHERE workspace_id = $1) AS "sqs_used!", EXISTS(SELECT 1 FROM gcp_trigger WHERE workspace_id = $1) AS "gcp_used!", EXISTS(SELECT 1 FROM email_trigger WHERE workspace_id = $1) AS "email_used!", - EXISTS(SELECT 1 FROM native_trigger WHERE workspace_id = $1 AND service_name = 'nextcloud'::native_trigger_service) AS "nextcloud_used!" + EXISTS(SELECT 1 FROM native_trigger WHERE workspace_id = $1 AND service_name = 'nextcloud'::native_trigger_service) AS "nextcloud_used!", + EXISTS(SELECT 1 FROM native_trigger WHERE workspace_id = $1 AND service_name = 'google'::native_trigger_service) AS "google_used!" "#, w_id ) @@ -4366,9 +4370,13 @@ async fn list_protection_rules( Extension(db): Extension, Path(w_id): Path, ) -> JsonResult> { - let rules = - (*windmill_common::workspaces::get_protection_rules(&w_id, &db).await?).clone(); - Ok(Json(rules.into_iter().map(ProtectionRulesetResponse::from).collect())) + let rules = (*windmill_common::workspaces::get_protection_rules(&w_id, &db).await?).clone(); + Ok(Json( + rules + .into_iter() + .map(ProtectionRulesetResponse::from) + .collect(), + )) } /// Create a new protection rule diff --git a/backend/windmill-api/openapi.yaml b/backend/windmill-api/openapi.yaml index 23aed31aaebca..80591e48e7b43 100644 --- a/backend/windmill-api/openapi.yaml +++ b/backend/windmill-api/openapi.yaml @@ -3625,6 +3625,8 @@ paths: type: boolean nextcloud_used: type: boolean + google_used: + type: boolean required: - http_routes_used - websocket_used @@ -3636,6 +3638,7 @@ paths: - sqs_used - email_used - nextcloud_used + - google_used /w/{workspace}/users/list: get: summary: list users @@ -12287,6 +12290,55 @@ paths: schema: type: string + /w/{workspace}/native_triggers/integrations/{service_name}/instance_sharing_available: + get: + summary: check if instance-level credential sharing is available for a service + operationId: checkInstanceSharingAvailable + tags: + - workspace_integration + parameters: + - $ref: "#/components/parameters/WorkspaceId" + - name: service_name + in: path + required: true + schema: + $ref: "#/components/schemas/NativeServiceName" + responses: + "200": + description: whether instance sharing is available + content: + application/json: + schema: + type: boolean + + /w/{workspace}/native_triggers/integrations/{service_name}/generate_instance_connect_url: + post: + summary: generate connect url using instance-level credentials + operationId: generateInstanceConnectUrl + tags: + - workspace_integration + parameters: + - $ref: "#/components/parameters/WorkspaceId" + - name: service_name + in: path + required: true + schema: + $ref: "#/components/schemas/NativeServiceName" + requestBody: + description: redirect_uri + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/RedirectUri" + responses: + "200": + description: authorization URL using instance credentials + content: + application/json: + schema: + type: string + /w/{workspace}/native_triggers/integrations/{service_name}/delete: delete: summary: delete native trigger service @@ -12308,7 +12360,7 @@ paths: schema: type: string - /w/{workspace}/native_triggers/integrations/{service_name}/callback/{code}/{state}: + /w/{workspace}/native_triggers/integrations/{service_name}/callback: post: summary: native trigger service oauth callback operationId: nativeTriggerServiceCallback @@ -12321,23 +12373,26 @@ paths: required: true schema: $ref: "#/components/schemas/NativeServiceName" - - name: code - in: path - required: true - schema: - type: string - - name: state - in: path - required: true - schema: - type: string requestBody: - description: redirect_uri + description: OAuth callback data required: true content: application/json: schema: - $ref: "#/components/schemas/RedirectUri" + type: object + properties: + code: + type: string + state: + type: string + redirect_uri: + type: string + resource_path: + type: string + required: + - code + - state + - redirect_uri responses: "200": description: native trigger service oauth completed @@ -12577,6 +12632,91 @@ paths: items: $ref: "#/components/schemas/NextCloudEventType" + /w/{workspace}/native_triggers/google/calendars: + get: + summary: list Google Calendars for the authenticated user + operationId: listGoogleCalendars + tags: + - native_trigger + parameters: + - name: workspace + in: path + required: true + schema: + type: string + responses: + "200": + description: list of Google Calendars + content: + application/json: + schema: + type: array + items: + $ref: "#/components/schemas/GoogleCalendarEntry" + + /w/{workspace}/native_triggers/google/drive/files: + get: + summary: list or search Google Drive files + operationId: listGoogleDriveFiles + tags: + - native_trigger + parameters: + - name: workspace + in: path + required: true + schema: + type: string + - name: q + in: query + description: search query to filter files by name + schema: + type: string + - name: parent_id + in: query + description: folder ID to list children of + schema: + type: string + - name: page_token + in: query + description: token for next page of results + schema: + type: string + - name: shared_with_me + in: query + description: if true, list files shared with the user + schema: + type: boolean + default: false + responses: + "200": + description: list of Google Drive files + content: + application/json: + schema: + $ref: "#/components/schemas/GoogleDriveFilesResponse" + + /w/{workspace}/native_triggers/google/drive/shared_drives: + get: + summary: list shared drives accessible to the user + operationId: listGoogleSharedDrives + tags: + - native_trigger + parameters: + - name: workspace + in: path + required: true + schema: + type: string + responses: + "200": + description: list of shared drives + content: + application/json: + schema: + type: array + items: + $ref: "#/components/schemas/SharedDriveEntry" + /native_triggers/{service_name}/w/{workspace_id}/webhook/{internal_id}: post: summary: receive webhook from external native trigger service @@ -19965,6 +20105,7 @@ components: - mqtt - sqs - gcp + - google TriggerMode: description: job trigger mode @@ -20416,6 +20557,8 @@ components: type: number nextcloud_count: type: number + google_count: + type: number WebsocketTrigger: allOf: @@ -21855,6 +21998,8 @@ components: type: number job_isolation: type: string + native_mode: + type: boolean required: - worker - worker_instance @@ -23367,6 +23512,7 @@ components: type: string enum: - nextcloud + - google NativeTrigger: type: object @@ -23449,6 +23595,10 @@ components: oauth_data: nullable: true $ref: "#/components/schemas/WorkspaceOAuthConfig" + resource_path: + type: string + nullable: true + description: Path to the resource storing the OAuth token required: - service_name @@ -23570,3 +23720,57 @@ components: - id - name - path + + GoogleCalendarEntry: + type: object + properties: + id: + type: string + summary: + type: string + primary: + type: boolean + default: false + required: + - id + - summary + + GoogleDriveFile: + type: object + properties: + id: + type: string + name: + type: string + mime_type: + type: string + is_folder: + type: boolean + default: false + required: + - id + - name + - mime_type + + GoogleDriveFilesResponse: + type: object + properties: + files: + type: array + items: + $ref: "#/components/schemas/GoogleDriveFile" + next_page_token: + type: string + required: + - files + + SharedDriveEntry: + type: object + properties: + id: + type: string + name: + type: string + required: + - id + - name diff --git a/backend/windmill-api/src/jobs.rs b/backend/windmill-api/src/jobs.rs index fbd9a9b6820cf..8bc597621ef7d 100644 --- a/backend/windmill-api/src/jobs.rs +++ b/backend/windmill-api/src/jobs.rs @@ -3413,29 +3413,43 @@ pub async fn get_args_and_trigger_metadata( // Build trigger metadata if this is a native trigger request #[cfg(feature = "native_trigger")] - let trigger_metadata = if let Some(service_name_str) = &run_query.service_name { - use crate::native_triggers::ServiceName; + let (trigger_metadata, native_args) = if let Some(service_name_str) = &run_query.service_name { + use crate::native_triggers::{prepare_native_trigger_args, ServiceName}; let service_name = ServiceName::try_from(service_name_str.to_owned())?; - Some(TriggerMetadata::new( + let metadata = Some(TriggerMetadata::new( run_query.trigger_external_id.clone(), service_name.as_job_trigger_kind(), - )) + )); + let body = match &args.body { + crate::args::RawBody::Json(s) => s.clone(), + crate::args::RawBody::Text(s) => s.clone(), + _ => String::new(), + }; + let native = + prepare_native_trigger_args(service_name, db, w_id, &args.metadata.headers, body) + .await?; + (metadata, native) } else { - None + (None, None) }; #[cfg(not(feature = "native_trigger"))] let trigger_metadata: Option = None; + #[cfg(not(feature = "native_trigger"))] + let native_args: Option = None; - let args = args - .to_args_from_runnable( + let args = if let Some(prepared) = native_args { + prepared + } else { + args.to_args_from_runnable( &authed, &db, &w_id, runnable_id, run_query.skip_preprocessor, ) - .await?; + .await? + }; Ok((args, trigger_metadata)) } diff --git a/backend/windmill-api/src/triggers/handler.rs b/backend/windmill-api/src/triggers/handler.rs index 660ecf6051e82..78715e2661bbc 100644 --- a/backend/windmill-api/src/triggers/handler.rs +++ b/backend/windmill-api/src/triggers/handler.rs @@ -136,6 +136,7 @@ pub struct TriggersCount { sqs_count: i64, gcp_count: i64, nextcloud_count: i64, + google_count: i64, } pub async fn get_triggers_count_internal( @@ -305,6 +306,16 @@ pub async fn get_triggers_count_internal( .await? .unwrap_or(0); + let google_count = sqlx::query_scalar!( + "SELECT COUNT(*) FROM native_trigger WHERE workspace_id = $1 AND script_path = $2 AND is_flow = $3 AND service_name = 'google'", + w_id, + path, + is_flow, + ) + .fetch_one(db) + .await? + .unwrap_or(0); + Ok(axum::Json(TriggersCount { primary_schedule: primary_schedule .map(|s| windmill_trigger::handler::TriggerPrimarySchedule { schedule: s }), @@ -321,5 +332,6 @@ pub async fn get_triggers_count_internal( gcp_count, sqs_count, nextcloud_count, + google_count, })) } diff --git a/backend/windmill-api/src/users.rs b/backend/windmill-api/src/users.rs index aa1f73405b019..498e93e61acf7 100644 --- a/backend/windmill-api/src/users.rs +++ b/backend/windmill-api/src/users.rs @@ -335,6 +335,15 @@ async fn update_username_in_workpsace<'c>( ).execute(&mut **tx) .await?; + sqlx::query!( + r#"UPDATE workspace_integrations SET resource_path = REGEXP_REPLACE(resource_path, 'u/' || $2 || '/(.*)', 'u/' || $1 || '/\1') WHERE resource_path LIKE ('u/' || $2 || '/%') AND workspace_id = $3"#, + new_username, + old_username, + w_id + ) + .execute(&mut **tx) + .await?; + sqlx::query!( "UPDATE variable SET extra_perms = extra_perms - ('u/' || $2) || jsonb_build_object(('u/' || $1), extra_perms->('u/' || $2)) WHERE extra_perms ? ('u/' || $2) AND workspace_id = $3", new_username, diff --git a/backend/windmill-common/src/global_settings.rs b/backend/windmill-common/src/global_settings.rs index cfa0839214886..77ad88e8a70d5 100644 --- a/backend/windmill-common/src/global_settings.rs +++ b/backend/windmill-common/src/global_settings.rs @@ -144,6 +144,43 @@ pub async fn load_value_from_global_settings( Ok(r) } +/// Read OAuth client_id and client_secret from instance-level global settings. +/// `oauth_key` is the key under `oauths` (e.g., "gworkspace", "nextcloud"). +pub async fn get_instance_oauth_credentials( + db: &Pool, + oauth_key: &str, +) -> error::Result<(String, String)> { + let oauths_value = load_value_from_global_settings(db, OAUTH_SETTING) + .await? + .ok_or_else(|| { + error::Error::InternalErr("Instance OAuth settings not found".to_string()) + })?; + + let entry = oauths_value.get(oauth_key).ok_or_else(|| { + error::Error::InternalErr(format!("No {} entry in instance OAuth settings", oauth_key)) + })?; + + let id = entry + .get("id") + .and_then(|v| v.as_str()) + .unwrap_or("") + .to_string(); + let secret = entry + .get("secret") + .and_then(|v| v.as_str()) + .unwrap_or("") + .to_string(); + + if id.is_empty() || secret.is_empty() { + return Err(error::Error::InternalErr(format!( + "Instance OAuth credentials for {} are incomplete", + oauth_key + ))); + } + + Ok((id, secret)) +} + pub async fn set_value_in_global_settings( db: &Pool, setting_name: &str, diff --git a/backend/windmill-common/src/worker.rs b/backend/windmill-common/src/worker.rs index 46f13d4444e07..f19dca9955829 100644 --- a/backend/windmill-common/src/worker.rs +++ b/backend/windmill-common/src/worker.rs @@ -156,6 +156,8 @@ lazy_static::lazy_static! { pub static ref NO_LOGS: bool = std::env::var("NO_LOGS").ok().is_some_and(|x| x == "1" || x == "true"); + pub static ref NATIVE_MODE: bool = std::env::var("NATIVE_MODE").ok().is_some_and(|x| x == "1" || x == "true"); + pub static ref CGROUP_V2_PATH_RE: Regex = Regex::new(r#"(?m)^0::(/.*)$"#).unwrap(); pub static ref CGROUP_V2_CPU_RE: Regex = Regex::new(r#"(?m)^(\d+) \S+$"#).unwrap(); pub static ref CGROUP_V1_INACTIVE_FILE_RE: Regex = Regex::new(r#"(?m)^total_inactive_file (\d+)$"#).unwrap(); @@ -227,6 +229,7 @@ lazy_static::lazy_static! { additional_python_paths: Default::default(), pip_local_dependencies: Default::default(), env_vars: Default::default(), + native_mode: false, })); pub static ref WORKER_PULL_QUERIES: Arc>> = Arc::new(RwLock::new(vec![])); @@ -273,6 +276,13 @@ lazy_static::lazy_static! { pub const ROOT_CACHE_NOMOUNT_DIR: &str = concatcp!(TMP_DIR, "/cache_nomount/"); +pub fn is_native_mode(worker_tags: &[String]) -> bool { + if *NATIVE_MODE || *WORKER_GROUP == "native" { + return true; + } + !worker_tags.is_empty() && worker_tags.iter().all(|t| NATIVE_TAGS.contains(t)) +} + pub static MIN_VERSION_IS_LATEST: AtomicBool = AtomicBool::new(false); #[derive(Clone)] pub struct HttpClient { @@ -1248,6 +1258,7 @@ pub struct Ping { pub occupancy_rate_5m: Option, pub occupancy_rate_30m: Option, pub job_isolation: Option, + pub native_mode: Option, pub ping_type: PingType, } pub async fn update_ping_http( @@ -1271,6 +1282,7 @@ pub async fn update_ping_http( insert_ping.occupancy_rate_15s, insert_ping.occupancy_rate_5m, insert_ping.occupancy_rate_30m, + insert_ping.native_mode.unwrap_or(false), db, ) .await? @@ -1297,6 +1309,7 @@ pub async fn update_ping_http( insert_ping.vcpus, insert_ping.memory, insert_ping.job_isolation, + insert_ping.native_mode.unwrap_or(false), db, ) .await?; @@ -1428,11 +1441,12 @@ pub async fn insert_ping_query( vcpus: Option, memory: Option, job_isolation: Option, + native_mode: bool, db: &DB, ) -> anyhow::Result<()> { sqlx::query!( - "INSERT INTO worker_ping (worker_instance, worker, ip, custom_tags, worker_group, dedicated_worker, dedicated_workers, wm_version, vcpus, memory, job_isolation) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) ON CONFLICT (worker) - DO UPDATE set ip = EXCLUDED.ip, custom_tags = EXCLUDED.custom_tags, worker_group = EXCLUDED.worker_group, dedicated_workers = EXCLUDED.dedicated_workers", + "INSERT INTO worker_ping (worker_instance, worker, ip, custom_tags, worker_group, dedicated_worker, dedicated_workers, wm_version, vcpus, memory, job_isolation, native_mode) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12) ON CONFLICT (worker) + DO UPDATE set ip = EXCLUDED.ip, custom_tags = EXCLUDED.custom_tags, worker_group = EXCLUDED.worker_group, dedicated_workers = EXCLUDED.dedicated_workers, native_mode = EXCLUDED.native_mode", worker_instance, worker_name, ip, @@ -1443,7 +1457,8 @@ pub async fn insert_ping_query( version, vcpus, memory, - job_isolation.as_deref() + job_isolation.as_deref(), + native_mode, ) .execute(db) .await?; @@ -1534,12 +1549,13 @@ pub async fn update_worker_ping_main_loop_query( occupancy_rate_15s: Option, occupancy_rate_5m: Option, occupancy_rate_30m: Option, + native_mode: bool, db: &DB, ) -> anyhow::Result<()> { timeout(Duration::from_secs(10), sqlx::query!( "UPDATE worker_ping SET ping_at = now(), jobs_executed = $1, custom_tags = $2, occupancy_rate = $3, memory_usage = $4, wm_memory_usage = $5, vcpus = COALESCE($7, vcpus), - memory = COALESCE($8, memory), occupancy_rate_15s = $9, occupancy_rate_5m = $10, occupancy_rate_30m = $11 WHERE worker = $6", + memory = COALESCE($8, memory), occupancy_rate_15s = $9, occupancy_rate_5m = $10, occupancy_rate_30m = $11, native_mode = $12 WHERE worker = $6", jobs_executed, tags, occupancy_rate, @@ -1551,6 +1567,7 @@ pub async fn update_worker_ping_main_loop_query( occupancy_rate_15s, occupancy_rate_5m, occupancy_rate_30m, + native_mode, ) .execute(db)) .await??; @@ -1789,6 +1806,9 @@ pub async fn load_worker_config( } } + let native_mode = + *NATIVE_MODE || config.native_mode.unwrap_or(false) || is_native_mode(&worker_tags); + Ok(WorkerConfig { worker_tags, priority_tags_sorted, @@ -1808,6 +1828,7 @@ pub async fn load_worker_config( .additional_python_paths .or_else(|| load_additional_python_paths_from_env()), env_vars: resolved_env_vars, + native_mode, }) } @@ -1896,6 +1917,7 @@ pub struct WorkerConfigOpt { pub pip_local_dependencies: Option>, pub env_vars_static: Option>, pub env_vars_allowlist: Option>, + pub native_mode: Option, } impl Default for WorkerConfigOpt { @@ -1913,6 +1935,7 @@ impl Default for WorkerConfigOpt { pip_local_dependencies: Default::default(), env_vars_static: Default::default(), env_vars_allowlist: Default::default(), + native_mode: Default::default(), } } } @@ -1930,12 +1953,13 @@ pub struct WorkerConfig { pub additional_python_paths: Option>, pub pip_local_dependencies: Option>, pub env_vars: HashMap, + pub native_mode: bool, } impl std::fmt::Debug for WorkerConfig { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "WorkerConfig {{ worker_tags: {:?}, priority_tags_sorted: {:?}, dedicated_worker: {:?}, dedicated_workers: {:?}, init_bash: {:?}, periodic_script_bash: {:?}, periodic_script_interval_seconds: {:?}, cache_clear: {:?}, additional_python_paths: {:?}, pip_local_dependencies: {:?}, env_vars: {:?} }}", - self.worker_tags, self.priority_tags_sorted, self.dedicated_worker, self.dedicated_workers, self.init_bash, self.periodic_script_bash, self.periodic_script_interval_seconds, self.cache_clear, self.additional_python_paths, self.pip_local_dependencies, self.env_vars.iter().map(|(k, v)| format!("{}: {}{} ({} chars)", k, &v[..3.min(v.len())], "***", v.len())).collect::>().join(", ")) + write!(f, "WorkerConfig {{ worker_tags: {:?}, priority_tags_sorted: {:?}, dedicated_worker: {:?}, dedicated_workers: {:?}, init_bash: {:?}, periodic_script_bash: {:?}, periodic_script_interval_seconds: {:?}, cache_clear: {:?}, additional_python_paths: {:?}, pip_local_dependencies: {:?}, env_vars: {:?}, native_mode: {:?} }}", + self.worker_tags, self.priority_tags_sorted, self.dedicated_worker, self.dedicated_workers, self.init_bash, self.periodic_script_bash, self.periodic_script_interval_seconds, self.cache_clear, self.additional_python_paths, self.pip_local_dependencies, self.env_vars.iter().map(|(k, v)| format!("{}: {}{} ({} chars)", k, &v[..3.min(v.len())], "***", v.len())).collect::>().join(", "), self.native_mode) } } diff --git a/backend/windmill-native-triggers/src/google/external.rs b/backend/windmill-native-triggers/src/google/external.rs new file mode 100644 index 0000000000000..aa1aa3f400fb0 --- /dev/null +++ b/backend/windmill-native-triggers/src/google/external.rs @@ -0,0 +1,639 @@ +use async_trait::async_trait; +use reqwest::Method; +use serde_json::value::RawValue; +use sqlx::PgConnection; +use std::collections::HashMap; +use windmill_common::{ + error::{Error, Result}, + worker::to_raw_value, + BASE_URL, DB, +}; +use windmill_queue::PushArgsOwned; + +use crate::{ + generate_webhook_service_url, get_token_by_prefix, + sync::{SyncAction, SyncError, TriggerSyncInfo}, + update_native_trigger_error, update_native_trigger_service_config, External, NativeTrigger, + NativeTriggerData, ServiceName, +}; + +use super::{ + endpoints, routes, CreateWatchResponse, Google, GoogleOAuthData, GoogleServiceConfig, + GoogleTriggerData, GoogleTriggerType, StopChannelRequest, WatchRequest, +}; + +#[async_trait] +impl External for Google { + type ServiceConfig = GoogleServiceConfig; + type TriggerData = GoogleTriggerData; + type OAuthData = GoogleOAuthData; + type CreateResponse = CreateWatchResponse; + + const SERVICE_NAME: ServiceName = ServiceName::Google; + const DISPLAY_NAME: &'static str = "Google"; + const SUPPORT_WEBHOOK: bool = true; + const TOKEN_ENDPOINT: &'static str = "https://oauth2.googleapis.com/token"; + const REFRESH_ENDPOINT: &'static str = "https://oauth2.googleapis.com/token"; + const AUTH_ENDPOINT: &'static str = "https://accounts.google.com/o/oauth2/v2/auth"; + + async fn create( + &self, + w_id: &str, + _oauth_data: &Self::OAuthData, + webhook_token: &str, + data: &NativeTriggerData, + db: &DB, + tx: &mut PgConnection, + ) -> Result { + let base_url = &*BASE_URL.read().await; + + // Generate a unique channel ID + let channel_id = uuid::Uuid::new_v4().to_string(); + + // Generate the webhook URL (external_id is the channel_id) + let webhook_url = generate_webhook_service_url( + base_url, + w_id, + &data.script_path, + data.is_flow, + Some(&channel_id), + Self::SERVICE_NAME, + webhook_token, + ); + + tracing::info!( + "Creating Google {} watch channel '{}' with webhook URL: {}", + data.service_config.trigger_type, + channel_id, + webhook_url + ); + + // Build the watch request with explicit expiration to avoid short Google defaults + let expiration_ms = chrono::Utc::now().timestamp_millis() + + (data.service_config.max_expiration_hours() as i64 * 3600 * 1000); + let mut watch_request = WatchRequest::new(channel_id.clone(), webhook_url); + watch_request.expiration = Some(expiration_ms); + + match data.service_config.trigger_type { + GoogleTriggerType::Drive => { + self.create_drive_watch(w_id, &data.service_config, &watch_request, db, tx) + .await + } + GoogleTriggerType::Calendar => { + self.create_calendar_watch(w_id, &data.service_config, &watch_request, db, tx) + .await + } + } + } + + async fn update( + &self, + w_id: &str, + oauth_data: &Self::OAuthData, + external_id: &str, + webhook_token: &str, + data: &NativeTriggerData, + db: &DB, + tx: &mut PgConnection, + ) -> Result { + // Google doesn't support updating watch channels — delete old, create new + let current = self.get(w_id, oauth_data, external_id, db, tx).await; + + // Try to stop the old channel (ignore errors if it doesn't exist) + if current.is_ok() { + let _ = self.delete(w_id, oauth_data, external_id, db, tx).await; + } + + let resp = self + .create(w_id, oauth_data, webhook_token, data, db, tx) + .await?; + + // Build config from request data + response metadata + self.service_config_from_create_response(data, &resp) + .ok_or_else(|| { + Error::InternalErr( + "Failed to build service_config from create response".to_string(), + ) + }) + } + + async fn get( + &self, + _w_id: &str, + _oauth_data: &Self::OAuthData, + external_id: &str, + _db: &DB, + tx: &mut PgConnection, + ) -> Result { + // Google doesn't have a "get channel" API + // We reconstruct the data from our stored service_config + let trigger = sqlx::query_as!( + crate::NativeTrigger, + r#" + SELECT + external_id, + workspace_id, + service_name AS "service_name!: ServiceName", + script_path, + is_flow, + webhook_token_prefix, + service_config, + error, + created_at, + updated_at + FROM native_trigger + WHERE external_id = $1 AND service_name = $2 + "#, + external_id, + ServiceName::Google as ServiceName + ) + .fetch_optional(&mut *tx) + .await? + .ok_or_else(|| Error::NotFound(format!("Trigger not found: {}", external_id)))?; + + let service_config: GoogleServiceConfig = trigger + .service_config + .map(|v| serde_json::from_value(v)) + .transpose()? + .ok_or_else(|| Error::InternalErr("Missing service config".to_string()))?; + + Ok(GoogleTriggerData { + trigger_type: service_config.trigger_type, + channel_id: external_id.to_string(), + google_resource_id: service_config + .google_resource_id + .clone() + .unwrap_or_default(), + expiration: service_config + .expiration + .as_deref() + .and_then(|s| s.parse::().ok()) + .unwrap_or(0), + // Drive fields + resource_id: service_config.resource_id, + resource_name: service_config.resource_name, + // Calendar fields + calendar_id: service_config.calendar_id, + calendar_name: service_config.calendar_name, + }) + } + + async fn delete( + &self, + w_id: &str, + _oauth_data: &Self::OAuthData, + external_id: &str, + db: &DB, + tx: &mut PgConnection, + ) -> Result<()> { + // Get the stored trigger to find the google_resource_id and trigger_type + let trigger = sqlx::query_scalar!( + r#" + SELECT service_config + FROM native_trigger + WHERE external_id = $1 AND service_name = $2 AND workspace_id = $3 + "#, + external_id, + ServiceName::Google as ServiceName, + w_id + ) + .fetch_optional(&mut *tx) + .await?; + + let config = trigger.flatten(); + if config.is_none() { + return Ok(()); + } + let config = config.unwrap(); + + let (google_resource_id, url) = super::parse_stop_channel_params(&config); + + if !google_resource_id.is_empty() { + let stop_request = + StopChannelRequest { id: external_id.to_string(), resource_id: google_resource_id }; + + // Stop the channel (ignore errors - channel may have already expired) + let result: std::result::Result = self + .http_client_request(&url, Method::POST, w_id, tx, db, None, Some(&stop_request)) + .await; + + if let Err(e) = result { + tracing::warn!("Failed to stop Google channel {}: {}", external_id, e); + } + } + + Ok(()) + } + + async fn exists( + &self, + w_id: &str, + _oauth_data: &Self::OAuthData, + external_id: &str, + _db: &DB, + tx: &mut PgConnection, + ) -> Result { + let exists = sqlx::query_scalar!( + r#" + SELECT EXISTS( + SELECT 1 FROM native_trigger + WHERE external_id = $1 AND service_name = $2 AND workspace_id = $3 + ) + "#, + external_id, + ServiceName::Google as ServiceName, + w_id + ) + .fetch_one(&mut *tx) + .await? + .unwrap_or(false); + + Ok(exists) + } + + async fn maintain_triggers( + &self, + db: &DB, + workspace_id: &str, + triggers: &[NativeTrigger], + _oauth_data: &Self::OAuthData, + synced: &mut Vec, + errors: &mut Vec, + ) { + renew_expiring_channels(self, db, workspace_id, triggers, synced, errors).await; + } + + async fn prepare_webhook( + &self, + _db: &DB, + _w_id: &str, + headers: HashMap, + body: String, + _script_path: &str, + _is_flow: bool, + ) -> Result { + // Google sends notification info in headers (same format for Drive and Calendar) + let payload = serde_json::json!({ + "channel_id": headers.get("x-goog-channel-id").cloned().unwrap_or_default(), + "resource_id": headers.get("x-goog-resource-id").cloned().unwrap_or_default(), + "resource_state": headers.get("x-goog-resource-state").cloned().unwrap_or_default(), + "resource_uri": headers.get("x-goog-resource-uri").cloned().unwrap_or_default(), + "message_number": headers.get("x-goog-message-number").cloned().unwrap_or_default(), + "channel_expiration": headers.get("x-goog-channel-expiration").cloned().unwrap_or_default(), + "changed": headers.get("x-goog-changed").cloned().unwrap_or_default(), + "body": if body.is_empty() { + serde_json::Value::Null + } else { + serde_json::from_str(&body).unwrap_or(serde_json::Value::Null) + } + }); + + let mut args: HashMap> = HashMap::new(); + args.insert("payload".to_string(), to_raw_value(&payload)); + + Ok(PushArgsOwned { extra: None, args }) + } + + fn external_id_and_metadata_from_response( + &self, + resp: &Self::CreateResponse, + ) -> (String, Option) { + let metadata = serde_json::json!({ + "googleResourceId": resp.resource_id, + "expiration": resp.expiration, + }); + (resp.id.clone(), Some(metadata)) + } + + fn service_config_from_create_response( + &self, + data: &NativeTriggerData, + resp: &Self::CreateResponse, + ) -> Option { + let mut config = data.service_config.clone(); + config.google_resource_id = Some(resp.resource_id.clone()); + config.expiration = Some(resp.expiration.clone()); + serde_json::to_value(&config).ok() + } + + fn additional_routes(&self) -> axum::Router { + routes::google_routes(self.clone()) + } +} + +// Helper methods for creating trigger type-specific watches +impl Google { + async fn create_drive_watch( + &self, + w_id: &str, + config: &GoogleServiceConfig, + watch_request: &WatchRequest, + db: &DB, + tx: &mut PgConnection, + ) -> Result { + match config.resource_id.as_deref().filter(|s| !s.is_empty()) { + Some(resource_id) => { + // Specific file: use files.watch + let url = format!("{}/files/{}/watch", endpoints::DRIVE_API_BASE, resource_id); + + self.http_client_request( + &url, + Method::POST, + w_id, + tx, + db, + None, + Some(watch_request), + ) + .await + } + None => { + // All changes: use changes.watch + let token_url = format!("{}/changes/startPageToken", endpoints::DRIVE_API_BASE); + let token_response: serde_json::Value = self + .http_client_request::<_, ()>(&token_url, Method::GET, w_id, tx, db, None, None) + .await?; + + let start_page_token = token_response + .get("startPageToken") + .and_then(|v| v.as_str()) + .ok_or_else(|| { + Error::InternalErr("Failed to get startPageToken".to_string()) + })?; + + let watch_body = serde_json::to_value(watch_request)?; + let watch_url = format!( + "{}/changes/watch?pageToken={}", + endpoints::DRIVE_API_BASE, + start_page_token + ); + + self.http_client_request( + &watch_url, + Method::POST, + w_id, + tx, + db, + None, + Some(&watch_body), + ) + .await + } + } + } + + async fn create_calendar_watch( + &self, + w_id: &str, + config: &GoogleServiceConfig, + watch_request: &WatchRequest, + db: &DB, + tx: &mut PgConnection, + ) -> Result { + let calendar_id = config.calendar_id.as_ref().ok_or_else(|| { + Error::BadRequest("calendar_id is required for Calendar triggers".into()) + })?; + + let url = format!( + "{}/calendars/{}/events/watch", + endpoints::CALENDAR_API_BASE, + urlencoding::encode(calendar_id) + ); + + self.http_client_request(&url, Method::POST, w_id, tx, db, None, Some(watch_request)) + .await + } + + /// Renew an expiring Google watch channel. + /// Stops the old channel and creates a new one with the same channel ID. + /// Returns the updated service_config with new expiration. + pub async fn renew_channel( + &self, + w_id: &str, + trigger: &NativeTrigger, + db: &DB, + ) -> Result { + let config: GoogleServiceConfig = trigger + .service_config + .as_ref() + .map(|v| serde_json::from_value(v.clone())) + .transpose()? + .ok_or_else(|| Error::InternalErr("Missing service config".to_string()))?; + + let webhook_token = get_token_by_prefix(db, &trigger.webhook_token_prefix) + .await? + .ok_or_else(|| Error::InternalErr("Webhook token not found".to_string()))?; + + let base_url = &*BASE_URL.read().await; + // Reuse the same channel ID so external_id stays permanent + let channel_id = trigger.external_id.clone(); + let webhook_url = generate_webhook_service_url( + base_url, + w_id, + &trigger.script_path, + trigger.is_flow, + Some(&channel_id), + ServiceName::Google, + &webhook_token, + ); + + tracing::info!( + "Renewing Google {} watch channel '{}' with webhook URL: {}", + config.trigger_type, + channel_id, + webhook_url + ); + + let expiration_ms = chrono::Utc::now().timestamp_millis() + + (config.max_expiration_hours() as i64 * 3600 * 1000); + let mut watch_request = WatchRequest::new(channel_id.clone(), webhook_url); + watch_request.expiration = Some(expiration_ms); + + // Best-effort stop old channel before creating a new one + let old_google_resource_id = trigger + .service_config + .as_ref() + .and_then(|c| c.get("googleResourceId")) + .and_then(|r| r.as_str()) + .unwrap_or_default(); + + if !old_google_resource_id.is_empty() { + let stop_request = StopChannelRequest { + id: channel_id.clone(), + resource_id: old_google_resource_id.to_string(), + }; + let url = match config.trigger_type { + GoogleTriggerType::Calendar => { + format!("{}/channels/stop", endpoints::CALENDAR_API_BASE) + } + GoogleTriggerType::Drive => { + format!("{}/channels/stop", endpoints::DRIVE_API_BASE) + } + }; + let mut tx = db.begin().await?; + let result: std::result::Result = self + .http_client_request( + &url, + Method::POST, + w_id, + &mut *tx, + db, + None, + Some(&stop_request), + ) + .await; + tx.commit().await?; + if let Err(e) = result { + tracing::warn!( + "Failed to stop old Google channel {} during renewal: {}", + channel_id, + e + ); + } + } + + // Create new watch channel with the same channel ID + let mut tx = db.begin().await?; + let resp = match config.trigger_type { + GoogleTriggerType::Drive => { + self.create_drive_watch(w_id, &config, &watch_request, db, &mut *tx) + .await? + } + GoogleTriggerType::Calendar => { + self.create_calendar_watch(w_id, &config, &watch_request, db, &mut *tx) + .await? + } + }; + tx.commit().await?; + + // Build the updated service_config with new expiration + let mut new_config = config; + new_config.google_resource_id = Some(resp.resource_id); + new_config.expiration = Some(resp.expiration); + + serde_json::to_value(&new_config) + .map_err(|e| Error::internal_err(format!("Failed to serialize config: {}", e))) + } +} + +/// Renewal window: renew Drive channels with <1 hour remaining, Calendar with <1 day remaining. +pub fn should_renew_channel(service_config: &serde_json::Value) -> bool { + let expiration_ms = service_config + .get("expiration") + .and_then(|v| v.as_str()) + .and_then(|s| s.parse::().ok()) + .unwrap_or(0); + + if expiration_ms == 0 { + return false; + } + + let now_ms = chrono::Utc::now().timestamp_millis(); + let remaining_ms = expiration_ms - now_ms; + + let trigger_type = service_config + .get("triggerType") + .and_then(|t| t.as_str()) + .unwrap_or("drive"); + + let renewal_window_ms: i64 = match trigger_type { + "calendar" => 24 * 60 * 60 * 1000, // 1 day for Calendar (7 day expiry) + _ => 60 * 60 * 1000, // 1 hour for Drive (24h expiry) + }; + + remaining_ms < renewal_window_ms +} + +async fn renew_expiring_channels( + handler: &Google, + db: &DB, + workspace_id: &str, + triggers: &[NativeTrigger], + synced: &mut Vec, + errors: &mut Vec, +) { + for trigger in triggers { + let Some(config) = &trigger.service_config else { + continue; + }; + + if !should_renew_channel(config) { + continue; + } + + tracing::info!( + "Renewing expiring Google channel {} for script_path '{}' in workspace '{}'", + trigger.external_id, + trigger.script_path, + workspace_id + ); + + match handler.renew_channel(workspace_id, trigger, db).await { + Ok(new_config) => { + match update_native_trigger_service_config( + db, + workspace_id, + ServiceName::Google, + &trigger.external_id, + &new_config, + ) + .await + { + Ok(()) => { + tracing::info!( + "Renewed Google channel {} for '{}'", + trigger.external_id, + trigger.script_path + ); + synced.push(TriggerSyncInfo { + external_id: trigger.external_id.clone(), + script_path: trigger.script_path.clone(), + action: SyncAction::ConfigUpdated, + }); + } + Err(e) => { + tracing::error!( + "Failed to update DB after renewing Google channel {}: {}", + trigger.external_id, + e + ); + errors.push(SyncError { + resource_path: format!("workspace:{}", workspace_id), + error_message: format!( + "Failed to update DB after channel renewal for {}: {}", + trigger.external_id, e + ), + error_type: "channel_renewal_error".to_string(), + }); + } + } + } + Err(e) => { + tracing::error!( + "Failed to renew Google channel {} for '{}': {}", + trigger.external_id, + trigger.script_path, + e + ); + + let _ = update_native_trigger_error( + db, + workspace_id, + ServiceName::Google, + &trigger.external_id, + Some(&format!("Channel renewal failed: {}", e)), + ) + .await; + + errors.push(SyncError { + resource_path: format!("workspace:{}", workspace_id), + error_message: format!( + "Channel renewal failed for {}: {}", + trigger.external_id, e + ), + error_type: "channel_renewal_error".to_string(), + }); + } + } + } +} diff --git a/backend/windmill-native-triggers/src/google/mod.rs b/backend/windmill-native-triggers/src/google/mod.rs new file mode 100644 index 0000000000000..bc026a82e5c7d --- /dev/null +++ b/backend/windmill-native-triggers/src/google/mod.rs @@ -0,0 +1,278 @@ +//! Google Native Trigger Module +//! +//! This module provides integration with Google services (Drive, Calendar) +//! push notification system to trigger Windmill scripts/flows when changes occur. +//! +//! ## Unified Architecture +//! A single "google" native trigger service handles both Drive and Calendar triggers. +//! The `trigger_type` field in `GoogleServiceConfig` determines which service to use. +//! +//! ## How it works: +//! 1. User configures a trigger with trigger_type (drive/calendar) and service-specific settings +//! 2. Windmill creates a "watch channel" via the appropriate Google API +//! 3. Google sends push notifications to Windmill's webhook when changes occur +//! 4. The webhook triggers the configured script/flow +//! +//! ## Important notes: +//! - Drive watch channels expire after max 24 hours +//! - Calendar watch channels expire after max 7 days +//! - Background sync job renews channels before expiration + +use serde::{Deserialize, Serialize}; + +pub mod external; +pub mod routes; + +pub use external::should_renew_channel; + +/// Extracts `(google_resource_id, stop_url)` from a native trigger's service_config JSON. +/// Used by the `delete` method and tested independently. +pub fn parse_stop_channel_params(config: &serde_json::Value) -> (String, String) { + let google_resource_id = config + .get("googleResourceId") + .and_then(|r| r.as_str()) + .map(String::from) + .unwrap_or_default(); + + let trigger_type = config + .get("triggerType") + .and_then(|t| t.as_str()) + .unwrap_or("drive"); + + let stop_url = match trigger_type { + "calendar" => format!("{}/channels/stop", endpoints::CALENDAR_API_BASE), + _ => format!("{}/channels/stop", endpoints::DRIVE_API_BASE), + }; + + (google_resource_id, stop_url) +} + +/// Handler struct for Google triggers (stateless, used for routing) +#[derive(Copy, Clone)] +pub struct Google; + +/// Type of Google trigger +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum GoogleTriggerType { + Drive, + Calendar, +} + +impl std::fmt::Display for GoogleTriggerType { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + GoogleTriggerType::Drive => write!(f, "drive"), + GoogleTriggerType::Calendar => write!(f, "calendar"), + } + } +} + +/// User-provided configuration for a Google trigger. +/// The trigger_type determines which service-specific config is used. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct GoogleServiceConfig { + /// The type of trigger (drive or calendar) + pub trigger_type: GoogleTriggerType, + + // Drive-specific fields (only used when trigger_type = drive) + /// The file ID to watch, or None for all changes (Drive only) + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_id: Option, + /// Human-readable name/path for display purposes (Drive only) + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_name: Option, + + // Calendar-specific fields (only used when trigger_type = calendar) + /// The calendar ID to watch (Calendar only, e.g., "primary") + #[serde(skip_serializing_if = "Option::is_none")] + pub calendar_id: Option, + /// Human-readable calendar name (Calendar only) + #[serde(skip_serializing_if = "Option::is_none")] + pub calendar_name: Option, + + // Metadata from Google watch channel (set after creation, used for renewal/deletion) + /// The resource ID assigned by Google for the watch channel + #[serde(skip_serializing_if = "Option::is_none")] + pub google_resource_id: Option, + /// Channel expiration time (Unix timestamp in milliseconds, as string) + #[serde(skip_serializing_if = "Option::is_none")] + pub expiration: Option, +} + +impl GoogleServiceConfig { + /// Returns the expiration duration for this trigger type in hours + pub fn max_expiration_hours(&self) -> u64 { + match self.trigger_type { + GoogleTriggerType::Drive => 24, // Google Drive: max 24 hours + GoogleTriggerType::Calendar => 168, // Google Calendar: max 7 days + } + } +} + +/// Data retrieved from Google about an active watch channel. +/// Used for sync operations and stored in service_config. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct GoogleTriggerData { + /// The type of trigger (drive or calendar) + pub trigger_type: GoogleTriggerType, + /// The channel ID (UUID generated by Windmill) + #[serde(skip_serializing)] + pub channel_id: String, + /// The resource ID assigned by Google + #[serde(skip_serializing)] + pub google_resource_id: String, + /// Channel expiration time (Unix timestamp in milliseconds) + pub expiration: i64, + + // Drive-specific fields + /// The file ID being watched, or None for all changes (Drive only) + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_id: Option, + /// Human-readable name/path (Drive only) + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_name: Option, + + // Calendar-specific fields + /// The calendar ID being watched (Calendar only) + #[serde(skip_serializing_if = "Option::is_none")] + pub calendar_id: Option, + /// Human-readable calendar name (Calendar only) + #[serde(skip_serializing_if = "Option::is_none")] + pub calendar_name: Option, +} + +/// OAuth data structure shared by all Google services. +/// Stored encrypted in workspace_integrations table with service_name = 'google'. +#[derive(Debug, Clone, Deserialize, Serialize)] +pub struct GoogleOAuthData { + /// The OAuth access token for API requests + pub access_token: String, + /// The OAuth refresh token for obtaining new access tokens + pub refresh_token: Option, + /// When the access token expires + pub token_expires_at: Option>, +} + +/// Google API endpoints +pub mod endpoints { + /// Google Drive API v3 base URL + pub const DRIVE_API_BASE: &str = "https://www.googleapis.com/drive/v3"; + /// Google Calendar API v3 base URL + pub const CALENDAR_API_BASE: &str = "https://www.googleapis.com/calendar/v3"; + /// Google OAuth2 token endpoint + pub const TOKEN_ENDPOINT: &str = "https://oauth2.googleapis.com/token"; + /// Google OAuth2 authorization endpoint + pub const AUTH_ENDPOINT: &str = "https://accounts.google.com/o/oauth2/v2/auth"; +} + +/// OAuth scopes for Google services +pub mod scopes { + /// Read-only access to Google Drive files + pub const DRIVE_READONLY: &str = "https://www.googleapis.com/auth/drive.readonly"; + /// Full access to Google Drive + pub const DRIVE_FULL: &str = "https://www.googleapis.com/auth/drive"; + /// Read-only access to Google Calendar + pub const CALENDAR_READONLY: &str = "https://www.googleapis.com/auth/calendar.readonly"; + /// Full access to Google Calendar + pub const CALENDAR_FULL: &str = "https://www.googleapis.com/auth/calendar"; + /// Events access to Google Calendar + pub const CALENDAR_EVENTS: &str = "https://www.googleapis.com/auth/calendar.events"; + + /// Returns all scopes needed for Google triggers (both Drive and Calendar) + pub fn all_scopes() -> Vec<&'static str> { + vec![DRIVE_READONLY, CALENDAR_READONLY, CALENDAR_EVENTS] + } +} + +/// Common response wrapper for Google API errors +#[derive(Debug, Deserialize)] +pub struct GoogleApiError { + pub error: GoogleErrorDetails, +} + +#[derive(Debug, Deserialize)] +pub struct GoogleErrorDetails { + pub code: i32, + pub message: String, + #[serde(default)] + pub errors: Vec, +} + +#[derive(Debug, Deserialize)] +pub struct GoogleErrorItem { + pub domain: Option, + pub reason: Option, + pub message: Option, +} + +/// Google Watch Channel response (used by Drive and Calendar push notifications) +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct WatchChannel { + /// Unique channel ID (we generate this as UUID) + pub id: String, + /// Resource ID assigned by Google + pub resource_id: String, + /// Resource URI being watched + pub resource_uri: Option, + /// Channel expiration time (Unix timestamp in milliseconds) + pub expiration: i64, + /// Token for validation (optional) + pub token: Option, +} + +/// Response from Google API when creating a watch channel +#[derive(Debug, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CreateWatchResponse { + /// The channel ID we provided + pub id: String, + /// Resource ID assigned by Google + pub resource_id: String, + /// Resource URI being watched + pub resource_uri: Option, + /// Channel expiration (Unix timestamp in milliseconds) + pub expiration: String, +} + +/// Request body for creating a watch channel +#[derive(Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct WatchRequest { + /// Unique channel ID (UUID) + pub id: String, + /// Type of delivery mechanism (always "web_hook") + #[serde(rename = "type")] + pub channel_type: String, + /// The URL to receive notifications + pub address: String, + /// Optional token for validation + #[serde(skip_serializing_if = "Option::is_none")] + pub token: Option, + /// Optional expiration time in milliseconds (Google may adjust this) + #[serde(skip_serializing_if = "Option::is_none")] + pub expiration: Option, +} + +impl WatchRequest { + pub fn new(channel_id: String, webhook_url: String) -> Self { + Self { + id: channel_id, + channel_type: "web_hook".to_string(), + address: webhook_url, + token: None, + expiration: None, + } + } +} + +/// Request body for stopping a watch channel +#[derive(Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct StopChannelRequest { + pub id: String, + pub resource_id: String, +} diff --git a/backend/windmill-native-triggers/src/google/routes.rs b/backend/windmill-native-triggers/src/google/routes.rs new file mode 100644 index 0000000000000..44635afb2474e --- /dev/null +++ b/backend/windmill-native-triggers/src/google/routes.rs @@ -0,0 +1,223 @@ +use std::sync::Arc; + +use axum::{ + extract::{Path, Query}, + routing::get, + Extension, Json, Router, +}; +use http::Method; +use serde::{Deserialize, Serialize}; +use windmill_common::{db::UserDB, error::JsonResult, DB}; + +use crate::{get_workspace_integration, External, ServiceName}; +use windmill_api_auth::ApiAuthed; + +use super::Google; + +#[derive(Debug, Serialize, Deserialize)] +pub struct GoogleCalendarEntry { + pub id: String, + pub summary: String, + #[serde(default)] + pub primary: bool, +} + +#[derive(Debug, Deserialize)] +#[serde(rename_all = "camelCase")] +struct GoogleCalendarListResponse { + #[serde(default)] + items: Vec, +} + +#[derive(Debug, Deserialize)] +#[serde(rename_all = "camelCase")] +struct GoogleCalendarListItem { + id: String, + #[serde(default)] + summary: String, + #[serde(default)] + primary: bool, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct GoogleDriveFile { + pub id: String, + pub name: String, + pub mime_type: String, + #[serde(default)] + pub is_folder: bool, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct GoogleDriveFilesResponse { + pub files: Vec, + #[serde(skip_serializing_if = "Option::is_none")] + pub next_page_token: Option, +} + +#[derive(Debug, Deserialize)] +#[serde(rename_all = "camelCase")] +struct DriveApiResponse { + #[serde(default)] + files: Vec, + next_page_token: Option, +} + +#[derive(Debug, Deserialize)] +#[serde(rename_all = "camelCase")] +struct DriveApiFile { + id: String, + name: String, + mime_type: String, +} + +#[derive(Debug, Deserialize)] +pub struct DriveFilesQuery { + pub q: Option, + pub parent_id: Option, + pub page_token: Option, + #[serde(default)] + pub shared_with_me: bool, +} + +async fn list_calendars( + authed: ApiAuthed, + Extension(handler): Extension>, + Extension(db): Extension, + Extension(user_db): Extension, + Path(workspace_id): Path, +) -> JsonResult> { + let mut tx = user_db.begin(&authed).await?; + get_workspace_integration(&mut *tx, &workspace_id, ServiceName::Google).await?; + + let url = format!( + "{}/users/me/calendarList", + super::endpoints::CALENDAR_API_BASE + ); + + let response: GoogleCalendarListResponse = handler + .http_client_request::<_, ()>(&url, Method::GET, &workspace_id, &mut *tx, &db, None, None) + .await?; + tx.commit().await?; + + let calendars = response + .items + .into_iter() + .map(|item| GoogleCalendarEntry { + id: item.id, + summary: item.summary, + primary: item.primary, + }) + .collect(); + + Ok(Json(calendars)) +} + +async fn list_drive_files( + authed: ApiAuthed, + Extension(handler): Extension>, + Extension(db): Extension, + Extension(user_db): Extension, + Path(workspace_id): Path, + Query(query): Query, +) -> JsonResult { + let mut tx = user_db.begin(&authed).await?; + get_workspace_integration(&mut *tx, &workspace_id, ServiceName::Google).await?; + + let drive_query = if query.shared_with_me { + "sharedWithMe = true and trashed = false".to_string() + } else if let Some(ref parent_id) = query.parent_id { + format!("'{}' in parents and trashed = false", parent_id) + } else if let Some(ref search) = query.q { + format!("name contains '{}' and trashed = false", search) + } else { + "'root' in parents and trashed = false".to_string() + }; + + let mut url = format!( + "{}/files?q={}&fields=files(id,name,mimeType),nextPageToken&pageSize=50&orderBy=folder,name&supportsAllDrives=true&includeItemsFromAllDrives=true", + super::endpoints::DRIVE_API_BASE, + urlencoding::encode(&drive_query) + ); + + if let Some(ref page_token) = query.page_token { + url.push_str(&format!("&pageToken={}", urlencoding::encode(page_token))); + } + + let response: DriveApiResponse = handler + .http_client_request::<_, ()>(&url, Method::GET, &workspace_id, &mut *tx, &db, None, None) + .await?; + tx.commit().await?; + + let files = response + .files + .into_iter() + .map(|f| { + let is_folder = f.mime_type == "application/vnd.google-apps.folder"; + GoogleDriveFile { id: f.id, name: f.name, mime_type: f.mime_type, is_folder } + }) + .collect(); + + Ok(Json(GoogleDriveFilesResponse { + files, + next_page_token: response.next_page_token, + })) +} + +#[derive(Debug, Serialize)] +pub struct SharedDriveEntry { + pub id: String, + pub name: String, +} + +#[derive(Debug, Deserialize)] +#[serde(rename_all = "camelCase")] +struct SharedDrivesApiResponse { + #[serde(default)] + drives: Vec, +} + +#[derive(Debug, Deserialize)] +#[serde(rename_all = "camelCase")] +struct SharedDriveApiEntry { + id: String, + name: String, +} + +async fn list_shared_drives( + authed: ApiAuthed, + Extension(handler): Extension>, + Extension(db): Extension, + Extension(user_db): Extension, + Path(workspace_id): Path, +) -> JsonResult> { + let mut tx = user_db.begin(&authed).await?; + get_workspace_integration(&mut *tx, &workspace_id, ServiceName::Google).await?; + + let url = format!( + "{}/drives?pageSize=100&fields=drives(id,name)", + super::endpoints::DRIVE_API_BASE + ); + + let response: SharedDrivesApiResponse = handler + .http_client_request::<_, ()>(&url, Method::GET, &workspace_id, &mut *tx, &db, None, None) + .await?; + tx.commit().await?; + + let drives = response + .drives + .into_iter() + .map(|d| SharedDriveEntry { id: d.id, name: d.name }) + .collect(); + + Ok(Json(drives)) +} + +pub fn google_routes(service: Google) -> Router { + let service = Arc::new(service); + Router::new() + .route("/calendars", get(list_calendars)) + .route("/drive/files", get(list_drive_files)) + .route("/drive/shared_drives", get(list_shared_drives)) + .layer(Extension(service)) +} diff --git a/backend/windmill-native-triggers/src/handler.rs b/backend/windmill-native-triggers/src/handler.rs index ef054d46648c8..4246d7b3266a3 100644 --- a/backend/windmill-native-triggers/src/handler.rs +++ b/backend/windmill-native-triggers/src/handler.rs @@ -1,8 +1,7 @@ use crate::{ - delete_native_trigger, delete_token_by_prefix, get_native_trigger, get_token_by_prefix, - get_workspace_integration, list_native_triggers, store_native_trigger, - update_native_trigger_error, External, NativeTrigger, NativeTriggerConfig, NativeTriggerData, - ServiceName, + decrypt_oauth_data, delete_native_trigger, delete_token_by_prefix, get_native_trigger, + get_token_by_prefix, list_native_triggers, store_native_trigger, update_native_trigger_error, + External, NativeTrigger, NativeTriggerConfig, NativeTriggerData, ServiceName, }; use axum::{ extract::{Path, Query}, @@ -132,15 +131,9 @@ async fn create_native_trigger( ) .await?; - let integration = get_workspace_integration(&mut *tx, &workspace_id, service_name).await?; - - let oauth_data: T::OAuthData = serde_json::from_value(integration.oauth_data).map_err(|e| { - Error::InternalErr(format!( - "Failed to parse {} OAuth data: {}", - T::DISPLAY_NAME, - e - )) - })?; + let integration_service = service_name.integration_service(); + let oauth_data: T::OAuthData = + decrypt_oauth_data(&mut *tx, &db, &workspace_id, integration_service).await?; let resp = handler .create( @@ -155,24 +148,25 @@ async fn create_native_trigger( let (external_id, _) = handler.external_id_and_metadata_from_response(&resp); - // update the created external trigger with a new uri containing the external_id - handler - .update( - &workspace_id, - &oauth_data, - &external_id, - &webhook_token, - &data, - &db, - &mut tx, - ) - .await?; - - // Fetch the updated trigger data from the external service and extract service_config - let trigger_data = handler - .get(&workspace_id, &oauth_data, &external_id, &db, &mut tx) - .await?; - let service_config = handler.extract_service_config_from_trigger_data(&trigger_data)?; + // Some services (e.g. Google) can build service_config directly from the create response, + // while others (e.g. Nextcloud) need an update+get cycle to correct the webhook URL + // with the external_id assigned by the remote service. + let service_config = + if let Some(config) = handler.service_config_from_create_response(&data, &resp) { + config + } else { + handler + .update( + &workspace_id, + &oauth_data, + &external_id, + &webhook_token, + &data, + &db, + &mut tx, + ) + .await? + }; let config = NativeTriggerConfig { script_path: data.script_path.clone(), @@ -255,17 +249,11 @@ async fn update_native_trigger_handler( } }; - let integration = get_workspace_integration(&mut *tx, &workspace_id, service_name).await?; + let integration_service = service_name.integration_service(); + let oauth_data: T::OAuthData = + decrypt_oauth_data(&mut *tx, &db, &workspace_id, integration_service).await?; - let oauth_data: T::OAuthData = serde_json::from_value(integration.oauth_data).map_err(|e| { - Error::InternalErr(format!( - "Failed to parse {} OAuth data: {}", - T::DISPLAY_NAME, - e - )) - })?; - - handler + let service_config = handler .update( &workspace_id, &oauth_data, @@ -277,12 +265,6 @@ async fn update_native_trigger_handler( ) .await?; - // Fetch the updated trigger data from the external service and extract service_config - let trigger_data = handler - .get(&workspace_id, &oauth_data, &external_id, &db, &mut tx) - .await?; - let service_config = handler.extract_service_config_from_trigger_data(&trigger_data)?; - let config = NativeTriggerConfig { script_path: data.script_path.clone(), is_flow: data.is_flow, @@ -341,15 +323,9 @@ async fn get_native_trigger_handler( ) .await?; - let integration = get_workspace_integration(&mut *tx, &workspace_id, service_name).await?; - - let oauth_data: T::OAuthData = serde_json::from_value(integration.oauth_data).map_err(|e| { - Error::InternalErr(format!( - "Failed to parse {} OAuth data: {}", - T::DISPLAY_NAME, - e - )) - })?; + let integration_service = service_name.integration_service(); + let oauth_data: T::OAuthData = + decrypt_oauth_data(&mut *tx, &db, &workspace_id, integration_service).await?; let native_trigger = handler .get(&workspace_id, &oauth_data, &external_id, &db, &mut tx) @@ -430,15 +406,9 @@ async fn delete_native_trigger_handler( ) .await?; - let integration = get_workspace_integration(&mut *tx, &workspace_id, service_name).await?; - - let oauth_data: T::OAuthData = serde_json::from_value(integration.oauth_data).map_err(|e| { - Error::InternalErr(format!( - "Failed to parse {} OAuth data: {}", - T::DISPLAY_NAME, - e - )) - })?; + let integration_service = service_name.integration_service(); + let oauth_data: T::OAuthData = + decrypt_oauth_data(&mut *tx, &db, &workspace_id, integration_service).await?; handler .delete(&workspace_id, &oauth_data, &external_id, &db, &mut tx) @@ -562,15 +532,12 @@ pub fn generate_native_trigger_routers() -> Router { #[cfg(feature = "native_trigger")] { + use crate::google::Google; use crate::nextcloud::NextCloud; - // Register all service routes here - // When adding a new service: - // 1. Import the handler: use crate::newservice::NewServiceHandler; - // 2. Add the route: .nest("/newservice", service_routes(NewServiceHandler)) - return router.nest("/nextcloud", service_routes(NextCloud)); - // Add new services here: - // .nest("/newservice", service_routes(NewServiceHandler)) + return router + .nest("/nextcloud", service_routes(NextCloud)) + .nest("/google", service_routes(Google)); } #[cfg(not(feature = "native_trigger"))] diff --git a/backend/windmill-native-triggers/src/lib.rs b/backend/windmill-native-triggers/src/lib.rs index 456580b3effc1..51fd392a9bd0f 100644 --- a/backend/windmill-native-triggers/src/lib.rs +++ b/backend/windmill-native-triggers/src/lib.rs @@ -62,9 +62,9 @@ pub mod workspace_integrations; // Service modules - add new services here: #[cfg(feature = "native_trigger")] +pub mod google; +#[cfg(feature = "native_trigger")] pub mod nextcloud; -// #[cfg(feature = "native_trigger")] -// pub mod newservice; /// Enum of all supported native trigger services. /// When adding a new service, add a variant here (e.g., `NewService`). @@ -73,17 +73,15 @@ pub mod nextcloud; #[serde(rename_all = "lowercase")] pub enum ServiceName { Nextcloud, - // Add new services here: - // NewService, + Google, } impl TryFrom for ServiceName { type Error = Error; fn try_from(value: String) -> std::result::Result { - // Add new service match arms here: let service = match value.as_str() { "nextcloud" => ServiceName::Nextcloud, - // "newservice" => ServiceName::NewService, + "google" => ServiceName::Google, _ => { return Err(anyhow::anyhow!( "Unknown service, currently supported services are: [{}]", @@ -99,49 +97,73 @@ impl TryFrom for ServiceName { impl ServiceName { /// Returns the lowercase string identifier for this service. - /// Add new service match arms here. pub fn as_str(&self) -> &'static str { match self { ServiceName::Nextcloud => "nextcloud", - // ServiceName::NewService => "newservice", + ServiceName::Google => "google", } } /// Returns the corresponding TriggerKind for this service. - /// Requires adding the variant to TriggerKind in windmill_common. pub fn as_trigger_kind(&self) -> TriggerKind { match self { ServiceName::Nextcloud => TriggerKind::Nextcloud, - // ServiceName::NewService => TriggerKind::NewService, + ServiceName::Google => TriggerKind::Google, } } /// Returns the corresponding JobTriggerKind for this service. - /// Requires adding the variant to JobTriggerKind in windmill_common. pub fn as_job_trigger_kind(&self) -> windmill_common::jobs::JobTriggerKind { match self { ServiceName::Nextcloud => windmill_common::jobs::JobTriggerKind::Nextcloud, - // ServiceName::NewService => windmill_common::jobs::JobTriggerKind::NewService, + ServiceName::Google => windmill_common::jobs::JobTriggerKind::Google, } } /// Returns the OAuth token endpoint path for this service. - /// Used for building OAuth clients dynamically. pub fn token_endpoint(&self) -> &'static str { match self { ServiceName::Nextcloud => "/apps/oauth2/api/v1/token", - // ServiceName::NewService => "/oauth/token", + ServiceName::Google => "https://oauth2.googleapis.com/token", } } /// Returns the OAuth authorization endpoint path for this service. - /// Used for building OAuth authorization URLs. pub fn auth_endpoint(&self) -> &'static str { match self { ServiceName::Nextcloud => "/apps/oauth2/authorize", - // ServiceName::NewService => "/oauth/authorize", + ServiceName::Google => "https://accounts.google.com/o/oauth2/v2/auth", + } + } + + /// Returns the OAuth scopes for this service's authorization flow. + pub fn oauth_scopes(&self) -> &'static str { + match self { + ServiceName::Nextcloud => "read write", + ServiceName::Google => "https://www.googleapis.com/auth/drive.readonly https://www.googleapis.com/auth/calendar.readonly https://www.googleapis.com/auth/calendar.events", + } + } + + /// Returns the resource type used for storing OAuth tokens. + pub fn resource_type(&self) -> &'static str { + match self { + ServiceName::Nextcloud => "nextcloud", + ServiceName::Google => "gworkspace", + } + } + + /// Returns extra OAuth authorization parameters required by this service. + pub fn extra_auth_params(&self) -> &[(&'static str, &'static str)] { + match self { + ServiceName::Google => &[("access_type", "offline"), ("prompt", "consent")], + ServiceName::Nextcloud => &[], } } + + /// Returns the integration service name for workspace_integrations lookup. + pub fn integration_service(&self) -> ServiceName { + *self + } } impl std::fmt::Display for ServiceName { @@ -150,6 +172,16 @@ impl std::fmt::Display for ServiceName { } } +/// Resolves an endpoint URL. If the endpoint is already an absolute URL (starts with http), +/// returns it as-is. Otherwise, prepends the base_url. +pub fn resolve_endpoint(base_url: &str, endpoint: &str) -> String { + if endpoint.starts_with("http://") || endpoint.starts_with("https://") { + endpoint.to_string() + } else { + format!("{}{}", base_url, endpoint) + } +} + #[derive(Debug, Clone, FromRow, Serialize, Deserialize)] pub struct NativeTrigger { pub external_id: String, @@ -183,6 +215,7 @@ pub struct WorkspaceIntegration { pub workspace_id: String, pub service_name: ServiceName, pub oauth_data: serde_json::Value, + pub resource_path: Option, pub created_at: DateTime, pub updated_at: DateTime, pub created_by: String, @@ -200,6 +233,7 @@ pub trait External: Send + Sync + 'static { const DISPLAY_NAME: &'static str; const TOKEN_ENDPOINT: &'static str; const REFRESH_ENDPOINT: &'static str; + const AUTH_ENDPOINT: &'static str; async fn create( &self, @@ -211,6 +245,10 @@ pub trait External: Send + Sync + 'static { tx: &mut PgConnection, ) -> Result; + /// Update a trigger on the external service and return the resolved service_config to store. + /// Each service is responsible for resolving the final config: + /// - Services that re-create the resource (e.g. Google) build config from request data + response metadata. + /// - Services that modify in-place (e.g. Nextcloud) fetch back the updated state and extract config. async fn update( &self, w_id: &str, @@ -220,7 +258,7 @@ pub trait External: Send + Sync + 'static { data: &NativeTriggerData, db: &DB, tx: &mut PgConnection, - ) -> Result<()>; + ) -> Result; async fn get( &self, @@ -250,13 +288,19 @@ pub trait External: Send + Sync + 'static { tx: &mut PgConnection, ) -> Result; - async fn list_all( + /// Periodic background maintenance for triggers in a workspace. + /// Each service implements its own logic: + /// - Nextcloud: lists external triggers and reconciles with DB state + /// - Google: renews expiring watch channels + async fn maintain_triggers( &self, - w_id: &str, - oauth_data: &Self::OAuthData, db: &DB, - tx: &mut PgConnection, - ) -> Result>; + workspace_id: &str, + triggers: &[NativeTrigger], + oauth_data: &Self::OAuthData, + synced: &mut Vec, + errors: &mut Vec, + ); async fn prepare_webhook( &self, @@ -275,19 +319,18 @@ pub trait External: Send + Sync + 'static { resp: &Self::CreateResponse, ) -> (String, Option); - fn get_external_id_from_trigger_data(&self, data: &Self::TriggerData) -> String; - - /// Extracts the service-specific config from trigger data (from external service). - /// Used for comparison during sync to detect config drift. - /// Default implementation converts the trigger data to a JSON value - /// If you need to exclude some fields, skip serializing attributes on the TriggerData struct or override this method. - fn extract_service_config_from_trigger_data( + /// Build the service_config directly from the create response and input data, + /// skipping the update+get cycle after creation. + /// Return `None` (default) to use the update+get pattern (e.g. Nextcloud needs to + /// correct the webhook URL with the external_id assigned by the remote service). + /// Return `Some(config)` to skip update+get entirely (e.g. Google already includes + /// the channel_id in the webhook URL from the start). + fn service_config_from_create_response( &self, - data: &Self::TriggerData, - ) -> Result { - serde_json::to_value(data).map_err(|e| { - Error::internal_err(format!("Failed to convert trigger data to JSON: {}", e)) - }) + _data: &NativeTriggerData, + _resp: &Self::CreateResponse, + ) -> Option { + None } fn additional_routes(&self) -> axum::Router { @@ -327,19 +370,26 @@ pub trait External: Send + Sync + 'static { err.status().unwrap() ); - let refreshed_oauth_config = - refresh_oauth_tokens(&oauth_config, Self::REFRESH_ENDPOINT).await?; + let refreshed_oauth_config = refresh_oauth_tokens( + &oauth_config, + Self::REFRESH_ENDPOINT, + Self::AUTH_ENDPOINT, + ) + .await?; task::spawn({ let db_clone = db.clone(); let workspace_id_clone = workspace_id.to_string(); - let refreshed_json = oauth_config_to_json(&refreshed_oauth_config); + let service_name = Self::SERVICE_NAME; + let new_access_token = refreshed_oauth_config.access_token.clone(); + let new_refresh_token = refreshed_oauth_config.refresh_token.clone(); async move { - update_workspace_integration_tokens_helper( - db_clone, - workspace_id_clone, - Self::SERVICE_NAME, - refreshed_json, + update_oauth_token_resource( + &db_clone, + &workspace_id_clone, + service_name, + &new_access_token, + new_refresh_token.as_deref(), ) .await; } @@ -376,7 +426,7 @@ pub async fn make_http_request( headers: Option>, body: Option<&B>, access_token: &str, -) -> std::result::Result { +) -> std::result::Result { let client = Client::new(); let mut request = client.request(method, url); @@ -400,9 +450,65 @@ pub async fn make_http_request( let response = request.send().await?.error_for_status()?; - let response_json = response.json().await?; + // Handle empty responses (e.g. 204 No Content from Google channels/stop) + let bytes = response.bytes().await?; + if bytes.is_empty() { + serde_json::from_str("null").map_err(HttpRequestError::Json) + } else { + serde_json::from_slice(&bytes).map_err(HttpRequestError::Json) + } +} + +#[derive(Debug)] +pub enum HttpRequestError { + Reqwest(reqwest::Error), + Json(serde_json::Error), +} + +impl std::fmt::Display for HttpRequestError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + HttpRequestError::Reqwest(e) => write!(f, "{}", e), + HttpRequestError::Json(e) => write!(f, "JSON decode error: {}", e), + } + } +} + +impl std::error::Error for HttpRequestError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + HttpRequestError::Reqwest(e) => Some(e), + HttpRequestError::Json(e) => Some(e), + } + } +} + +impl From for HttpRequestError { + fn from(e: reqwest::Error) -> Self { + HttpRequestError::Reqwest(e) + } +} + +impl HttpRequestError { + pub fn status(&self) -> Option { + match self { + HttpRequestError::Reqwest(e) => e.status(), + HttpRequestError::Json(_) => None, + } + } +} - Ok(response_json) +/// Read OAuth client_id and client_secret from instance-level global settings. +/// Used when a workspace integration has `instance_shared: true`. +async fn get_instance_oauth_credentials( + db: &DB, + service_name: ServiceName, +) -> Result<(String, String)> { + windmill_common::global_settings::get_instance_oauth_credentials( + db, + service_name.resource_type(), + ) + .await } pub async fn decrypt_oauth_data< @@ -416,75 +522,81 @@ pub async fn decrypt_oauth_data< service_name: ServiceName, ) -> Result { let integration = get_workspace_integration(tx, workspace_id, service_name).await?; + let oauth_data = integration.oauth_data; - let mc = build_crypt(db, workspace_id).await?; - let mut oauth_data: serde_json::Value = integration.oauth_data; + let resource_path = integration.resource_path.as_deref().ok_or_else(|| { + Error::InternalErr(format!( + "No resource_path in {} integration config. Please reconnect the integration.", + service_name + )) + })?; - if let Some(encrypted_access_token) = oauth_data.get("access_token").and_then(|v| v.as_str()) { - let decrypted_access_token = decrypt(&mc, encrypted_access_token.to_string()) - .map_err(|e| Error::InternalErr(format!("Failed to decrypt access token: {}", e)))?; - oauth_data["access_token"] = serde_json::Value::String(decrypted_access_token); - } + let mc = build_crypt(db, workspace_id).await?; - if let Some(encrypted_refresh_token) = oauth_data.get("refresh_token").and_then(|v| v.as_str()) + let var_row = sqlx::query!( + "SELECT value, account FROM variable WHERE workspace_id = $1 AND path = $2", + workspace_id, + resource_path, + ) + .fetch_optional(db) + .await? + .ok_or_else(|| { + Error::InternalErr(format!( + "Variable at {} not found for {} integration", + resource_path, service_name + )) + })?; + + let access_token = decrypt(&mc, var_row.value) + .map_err(|e| Error::InternalErr(format!("Failed to decrypt access token: {}", e)))?; + + let refresh_token = if let Some(account_id) = var_row.account { + sqlx::query_scalar!( + "SELECT refresh_token FROM account WHERE workspace_id = $1 AND id = $2", + workspace_id, + account_id, + ) + .fetch_optional(db) + .await? + } else { + None + }; + + let (client_id, client_secret) = if oauth_data + .get("instance_shared") + .and_then(|v| v.as_bool()) + .unwrap_or(false) { - let decrypted_refresh_token = decrypt(&mc, encrypted_refresh_token.to_string()) - .map_err(|e| Error::InternalErr(format!("Failed to decrypt refresh token: {}", e)))?; - oauth_data["refresh_token"] = serde_json::Value::String(decrypted_refresh_token); - } - - serde_json::from_value(oauth_data) - .map_err(|e| Error::InternalErr(format!("Failed to deserialize OAuth data: {}", e))) -} - -#[allow(unused)] -pub fn oauth_data_to_config(oauth_data: &serde_json::Value) -> Result { - let base_url = oauth_data - .get("base_url") - .and_then(|v| v.as_str()) - .ok_or_else(|| Error::InternalErr("No base_url in OAuth data".to_string()))? - .to_string(); - - let access_token = oauth_data - .get("access_token") - .and_then(|v| v.as_str()) - .ok_or_else(|| Error::InternalErr("No access_token in OAuth data".to_string()))? - .to_string(); - - let refresh_token = oauth_data - .get("refresh_token") - .and_then(|v| v.as_str()) - .map(|s| s.to_string()); - - let client_id = oauth_data - .get("client_id") - .and_then(|v| v.as_str()) - .ok_or_else(|| Error::InternalErr("No client_id in OAuth data".to_string()))? - .to_string(); - - let client_secret = oauth_data - .get("client_secret") - .and_then(|v| v.as_str()) - .ok_or_else(|| Error::InternalErr("No client_secret in OAuth data".to_string()))? - .to_string(); - - Ok(OAuthConfig { base_url, access_token, refresh_token, client_id, client_secret }) -} - -#[inline] -pub fn oauth_config_to_json(config: &OAuthConfig) -> serde_json::Value { - let mut json = json!({ - "base_url": config.base_url, - "access_token": config.access_token, - "client_id": config.client_id, - "client_secret": config.client_secret, + // Read credentials from instance-level global settings instead of workspace_integrations + let (id, secret) = get_instance_oauth_credentials(db, service_name) + .await + .map_err(|e| { + Error::InternalErr(format!( + "Failed to read instance OAuth credentials for {}: {}", + service_name, e + )) + })?; + (id, secret) + } else { + ( + oauth_data["client_id"].as_str().unwrap_or("").to_string(), + oauth_data["client_secret"] + .as_str() + .unwrap_or("") + .to_string(), + ) + }; + + let assembled = json!({ + "base_url": oauth_data["base_url"].as_str().unwrap_or(""), + "access_token": access_token, + "refresh_token": refresh_token, + "client_id": client_id, + "client_secret": client_secret, }); - if let Some(refresh_token) = &config.refresh_token { - json["refresh_token"] = serde_json::Value::String(refresh_token.clone()); - } - - json + serde_json::from_value(assembled) + .map_err(|e| Error::InternalErr(format!("Failed to deserialize OAuth data: {}", e))) } /// Token refresh response @@ -500,6 +612,7 @@ struct RefreshTokenResponse { pub async fn refresh_oauth_tokens( oauth_config: &OAuthConfig, refresh_endpoint: &str, + auth_endpoint: &str, ) -> Result { let refresh_token_str = oauth_config .refresh_token @@ -508,9 +621,9 @@ pub async fn refresh_oauth_tokens( // Build OAuth client for token refresh // Auth URL is not used for refresh, but required by the client constructor - let auth_url = Url::parse(&format!("{}/oauth/authorize", oauth_config.base_url)) + let auth_url = Url::parse(&resolve_endpoint(&oauth_config.base_url, auth_endpoint)) .map_err(|e| Error::InternalErr(format!("Invalid auth URL: {}", e)))?; - let token_url = Url::parse(&format!("{}{}", oauth_config.base_url, refresh_endpoint)) + let token_url = Url::parse(&resolve_endpoint(&oauth_config.base_url, refresh_endpoint)) .map_err(|e| Error::InternalErr(format!("Invalid token URL: {}", e)))?; let mut client = OClient::new(oauth_config.client_id.clone(), auth_url, token_url); @@ -539,62 +652,64 @@ pub async fn refresh_oauth_tokens( pub async fn refresh_oauth_tokens( _oauth_config: &OAuthConfig, _refresh_endpoint: &str, + _auth_endpoint: &str, ) -> Result { Err(Error::InternalErr( "Native triggers feature is not enabled".to_string(), )) } -async fn update_workspace_integration_tokens_helper( - db: DB, - workspace_id: String, +async fn update_oauth_token_resource( + db: &DB, + workspace_id: &str, service_name: ServiceName, - oauth_data: serde_json::Value, + new_access_token: &str, + new_refresh_token: Option<&str>, ) { let result = async { - let mut tx = db.begin().await?; - let mc = build_crypt(&db, &workspace_id).await?; - let mut encrypted_oauth_data = oauth_data; - - if let Some(access_token) = encrypted_oauth_data - .get("access_token") - .and_then(|v| v.as_str()) - { - let encrypted_access_token = encrypt(&mc, access_token); - encrypted_oauth_data["access_token"] = - serde_json::Value::String(encrypted_access_token); - } + let integration = get_workspace_integration(db, workspace_id, service_name).await?; + let resource_path = integration.resource_path.ok_or_else(|| { + Error::InternalErr(format!( + "No resource_path in {} integration config", + service_name + )) + })?; - if let Some(refresh_token) = encrypted_oauth_data - .get("refresh_token") - .and_then(|v| v.as_str()) - { - let encrypted_refresh_token = encrypt(&mc, refresh_token); - encrypted_oauth_data["refresh_token"] = - serde_json::Value::String(encrypted_refresh_token); - } + let mc = build_crypt(db, workspace_id).await?; + let encrypted_token = encrypt(&mc, new_access_token); sqlx::query!( - r#" - UPDATE workspace_integrations - SET oauth_data = $1, updated_at = now() - WHERE workspace_id = $2 AND service_name = $3 - "#, - encrypted_oauth_data, + "UPDATE variable SET value = $1 WHERE workspace_id = $2 AND path = $3", + encrypted_token, workspace_id, - service_name as ServiceName, + resource_path, ) - .execute(&mut *tx) + .execute(db) .await?; - tx.commit().await?; + if let Some(refresh_token) = new_refresh_token { + sqlx::query!( + "UPDATE account SET refresh_token = $1, refresh_error = NULL + WHERE workspace_id = $2 AND client = $3 AND is_workspace_integration = true", + refresh_token, + workspace_id, + service_name.as_str(), + ) + .execute(db) + .await?; + } + Ok::<(), Error>(()) } .await; if let Err(e) = result { - tracing::error!("Critical error: Failed to update workspace integration tokens for {} in workspace {}: {}", - service_name, workspace_id, e); + tracing::error!( + "Failed to update OAuth tokens for {} in workspace {}: {}", + service_name, + workspace_id, + e + ); } } @@ -939,6 +1054,7 @@ pub async fn store_workspace_integration( workspace_id: &str, service_name: ServiceName, oauth_data: serde_json::Value, + resource_path: Option<&str>, ) -> Result<()> { sqlx::query!( r#" @@ -946,20 +1062,23 @@ pub async fn store_workspace_integration( workspace_id, service_name, oauth_data, + resource_path, created_by, created_at, updated_at ) VALUES ( - $1, $2, $3, $4, now(), now() + $1, $2, $3, $4, $5, now(), now() ) ON CONFLICT (workspace_id, service_name) DO UPDATE SET oauth_data = $3, + resource_path = $4, updated_at = now() "#, workspace_id, service_name as ServiceName, oauth_data, + resource_path, authed.username, ) .execute(&mut *tx) @@ -980,6 +1099,7 @@ pub async fn get_workspace_integration<'c, E: sqlx::Executor<'c, Database = Post workspace_id, service_name AS "service_name!: ServiceName", oauth_data, + resource_path, created_at, updated_at, created_by @@ -1051,3 +1171,43 @@ pub fn generate_webhook_service_url( url } + +/// Process incoming webhook request for a native trigger service. +/// Dispatches to the service-specific `prepare_webhook` to transform headers/body into args. +/// Returns `None` if the service doesn't need special processing (standard body parsing is used). +#[cfg(feature = "native_trigger")] +pub async fn prepare_native_trigger_args( + service_name: ServiceName, + db: &DB, + w_id: &str, + headers: &http::HeaderMap, + body: String, +) -> Result> { + let headers_map: HashMap = headers + .iter() + .filter_map(|(k, v)| v.to_str().ok().map(|v| (k.to_string(), v.to_string()))) + .collect(); + + match service_name { + ServiceName::Google => { + let handler = google::Google; + let args = handler + .prepare_webhook(db, w_id, headers_map, body, "", false) + .await?; + Ok(Some(args)) + } + ServiceName::Nextcloud => Ok(None), + } +} + +/// Fallback when native_trigger feature is disabled +#[cfg(not(feature = "native_trigger"))] +pub async fn prepare_native_trigger_args( + _service_name: ServiceName, + _db: &DB, + _w_id: &str, + _headers: &http::HeaderMap, + _body: String, +) -> Result> { + Ok(None) +} diff --git a/backend/windmill-native-triggers/src/nextcloud/external.rs b/backend/windmill-native-triggers/src/nextcloud/external.rs index 54b75782293e0..3a4bf3b0f9a5e 100644 --- a/backend/windmill-native-triggers/src/nextcloud/external.rs +++ b/backend/windmill-native-triggers/src/nextcloud/external.rs @@ -102,6 +102,7 @@ impl External for NextCloud { const SUPPORT_WEBHOOK: bool = true; const TOKEN_ENDPOINT: &'static str = "/apps/oauth2/api/v1/token"; const REFRESH_ENDPOINT: &'static str = "/apps/oauth2/api/v1/token"; + const AUTH_ENDPOINT: &'static str = "/oauth/authorize"; async fn create( &self, @@ -148,7 +149,7 @@ impl External for NextCloud { data: &NativeTriggerData, db: &DB, tx: &mut PgConnection, - ) -> Result<()> { + ) -> Result { // During update, we have the external_id so include it in the webhook URL let full_nextcloud_payload = FullNextcloudPayload::new(w_id, Some(external_id), webhook_token, data).await; @@ -173,7 +174,11 @@ impl External for NextCloud { ) .await?; - Ok(()) + // Fetch back the updated state and convert to JSON config + let trigger_data = self.get(w_id, oauth_data, external_id, db, tx).await?; + serde_json::to_value(&trigger_data).map_err(|e| { + Error::internal_err(format!("Failed to convert trigger data to JSON: {}", e)) + }) } async fn get( @@ -257,13 +262,94 @@ impl External for NextCloud { Ok(true) } + async fn maintain_triggers( + &self, + db: &DB, + workspace_id: &str, + triggers: &[crate::NativeTrigger], + oauth_data: &Self::OAuthData, + synced: &mut Vec, + errors: &mut Vec, + ) { + let mut tx = match db.begin().await { + Ok(tx) => tx, + Err(e) => { + errors.push(crate::sync::SyncError { + resource_path: format!("workspace:{}", workspace_id), + error_message: format!("Failed to begin transaction: {}", e), + error_type: "database_error".to_string(), + }); + return; + } + }; + + let external_triggers = match self.list_all(workspace_id, oauth_data, db, &mut tx).await { + Ok(triggers) => triggers, + Err(e) => { + tracing::error!( + "Failed to fetch external triggers for {}: {}", + workspace_id, + e + ); + errors.push(crate::sync::SyncError { + resource_path: format!("workspace:{}", workspace_id), + error_message: format!("Failed to fetch external triggers: {}", e), + error_type: "external_service_error".to_string(), + }); + return; + } + }; + + if let Err(e) = tx.commit().await { + errors.push(crate::sync::SyncError { + resource_path: format!("workspace:{}", workspace_id), + error_message: format!("Failed to commit transaction: {}", e), + error_type: "database_error".to_string(), + }); + return; + } + + // Convert to (external_id, config_json) pairs for reconciliation + let external_pairs: Vec<(String, serde_json::Value)> = external_triggers + .iter() + .filter_map(|data| { + let config = serde_json::to_value(data).ok()?; + Some((data.id.to_string(), config)) + }) + .collect(); + + crate::sync::reconcile_with_external_state( + db, + workspace_id, + ServiceName::Nextcloud, + triggers, + &external_pairs, + synced, + errors, + ) + .await; + } + + fn external_id_and_metadata_from_response( + &self, + resp: &Self::CreateResponse, + ) -> (String, Option) { + (resp.id.to_string(), None) + } + + fn additional_routes(&self) -> axum::Router { + routes::nextcloud_routes(self.clone()) + } +} + +impl NextCloud { async fn list_all( &self, w_id: &str, - oauth_data: &Self::OAuthData, + oauth_data: &NextCloudOAuthData, db: &DB, tx: &mut PgConnection, - ) -> Result> { + ) -> Result> { let url = format!( "{}/ocs/v2.php/apps/webhook_listeners/api/v1/webhooks", oauth_data.base_url @@ -286,19 +372,4 @@ impl External for NextCloud { Ok(ocs_response.ocs.data) } - - fn external_id_and_metadata_from_response( - &self, - resp: &Self::CreateResponse, - ) -> (String, Option) { - (resp.id.to_string(), None) - } - - fn get_external_id_from_trigger_data(&self, data: &Self::TriggerData) -> String { - data.id.to_string() - } - - fn additional_routes(&self) -> axum::Router { - routes::nextcloud_routes(self.clone()) - } } diff --git a/backend/windmill-native-triggers/src/nextcloud/routes.rs b/backend/windmill-native-triggers/src/nextcloud/routes.rs index 29534b9642620..69bc3cf0fd73d 100644 --- a/backend/windmill-native-triggers/src/nextcloud/routes.rs +++ b/backend/windmill-native-triggers/src/nextcloud/routes.rs @@ -11,7 +11,7 @@ use windmill_common::{ use crate::{ get_workspace_integration, nextcloud::{NextCloudEventType, OcsResponse}, - External, OAuthConfig, ServiceName, + External, ServiceName, }; use windmill_api_auth::ApiAuthed; @@ -26,12 +26,15 @@ async fn list_available_events( let integration = get_workspace_integration(&mut *tx, &workspace_id, ServiceName::Nextcloud).await?; - let auth = serde_json::from_value::(integration.oauth_data) - .map_err(|e| Error::InternalErr(format!("Failed to parse NextCloud OAuth data: {}", e)))?; + let base_url = integration + .oauth_data + .get("base_url") + .and_then(|v| v.as_str()) + .unwrap_or(""); let url = format!( "{}/ocs/v2.php/apps/integration_windmill/api/v1/list/events", - &auth.base_url, + base_url, ); let mut headers = HashMap::new(); diff --git a/backend/windmill-native-triggers/src/sync.rs b/backend/windmill-native-triggers/src/sync.rs index 6e7b70c0984ff..88578551c4554 100644 --- a/backend/windmill-native-triggers/src/sync.rs +++ b/backend/windmill-native-triggers/src/sync.rs @@ -9,7 +9,7 @@ use crate::ServiceName; #[cfg(feature = "native_trigger")] use crate::{ decrypt_oauth_data, list_native_triggers, update_native_trigger_error, - update_native_trigger_service_config, External, + update_native_trigger_service_config, External, NativeTrigger, }; #[derive(Debug, Serialize)] @@ -60,19 +60,20 @@ pub async fn sync_all_triggers(db: &DB) -> Result { // Each service only syncs workspaces that have the corresponding integration configured #[cfg(feature = "native_trigger")] { + use crate::google::Google; use crate::nextcloud::NextCloud; + // Nextcloud sync let (service_name, result) = sync_service_triggers(db, NextCloud).await; total_synced += result.synced_triggers.len(); total_errors += result.errors.len(); service_results.insert(service_name, result); - // Add new services here: - // use crate::newservice::NewService; - // let (service_name, result) = sync_service_triggers(db, NewService).await; - // total_synced += result.synced_triggers.len(); - // total_errors += result.errors.len(); - // service_results.insert(service_name, result); + // Google sync (handles both Drive and Calendar triggers) + let (service_name, result) = sync_service_triggers(db, Google).await; + total_synced += result.synced_triggers.len(); + total_errors += result.errors.len(); + service_results.insert(service_name, result); } // Count unique workspaces processed across all services @@ -105,6 +106,9 @@ async fn sync_service_triggers( let mut all_synced_triggers = Vec::new(); let mut all_errors = Vec::new(); + // Use the integration service for lookup (e.g., GoogleDrive/GoogleCalendar -> Google) + let integration_service = T::SERVICE_NAME.integration_service(); + // Only sync workspaces that have the corresponding integration configured let workspaces_with_integration = match sqlx::query_scalar!( r#" @@ -115,7 +119,7 @@ async fn sync_service_triggers( AND wi.oauth_data IS NOT NULL AND w.deleted = false "#, - T::SERVICE_NAME as ServiceName + integration_service as ServiceName ) .fetch_all(db) .await @@ -210,11 +214,14 @@ pub async fn sync_workspace_triggers( return Ok((Vec::new(), Vec::new())); } - let mut all_synced_triggers = Vec::new(); - let mut all_sync_errors = Vec::new(); + let mut synced = Vec::new(); + let mut errors = Vec::new(); + + // Use the integration service for OAuth lookup (e.g., GoogleDrive/GoogleCalendar -> Google) + let integration_service = T::SERVICE_NAME.integration_service(); let oauth_data = { - match decrypt_oauth_data(db, db, workspace_id, T::SERVICE_NAME).await { + match decrypt_oauth_data(db, db, workspace_id, integration_service).await { Ok(oauth_data) => oauth_data, Err(e) => { tracing::error!( @@ -222,46 +229,58 @@ pub async fn sync_workspace_triggers( workspace_id, e ); - all_sync_errors.push(SyncError { + errors.push(SyncError { resource_path: format!("workspace:{}", workspace_id), error_message: format!("Failed to get workspace integration OAuth data: {}", e), error_type: "oauth_error".to_string(), }); - return Ok((Vec::new(), all_sync_errors)); + return Ok((Vec::new(), errors)); } } }; - let mut tx = db.begin().await?; - let external_triggers = match handler - .list_all(workspace_id, &oauth_data, db, &mut tx) - .await - { - Ok(triggers) => triggers, - Err(e) => { - tracing::error!( - "Failed to fetch external triggers for {}: {}", - workspace_id, - e - ); - all_sync_errors.push(SyncError { - resource_path: format!("workspace:{}", workspace_id), - error_message: format!("Failed to fetch external triggers: {}", e), - error_type: "external_service_error".to_string(), - }); - return Ok((Vec::new(), all_sync_errors)); - } - }; - tx.commit().await?; + handler + .maintain_triggers( + db, + workspace_id, + &windmill_triggers, + &oauth_data, + &mut synced, + &mut errors, + ) + .await; + + tracing::info!( + "Sync completed for {} in workspace '{}'. Updated: {}, Errors: {}", + T::SERVICE_NAME.as_str(), + workspace_id, + synced.len(), + errors.len() + ); - // Build a map of external trigger IDs to their data - let mut external_trigger_map: HashMap = HashMap::new(); - for external_trigger in &external_triggers { - let external_id = handler.get_external_id_from_trigger_data(external_trigger); - external_trigger_map.insert(external_id, external_trigger); + Ok((synced, errors)) +} + +/// Reusable reconciliation logic for services with real external state (e.g. Nextcloud). +/// Compares external triggers with DB triggers: sets errors for missing ones, +/// clears errors and updates config for existing ones. +#[cfg(feature = "native_trigger")] +pub async fn reconcile_with_external_state( + db: &DB, + workspace_id: &str, + service_name: ServiceName, + windmill_triggers: &[NativeTrigger], + external_triggers: &[(String, serde_json::Value)], + synced: &mut Vec, + errors: &mut Vec, +) { + // Build a map of external trigger IDs to their config + let mut external_trigger_map: HashMap = HashMap::new(); + for (external_id, config) in external_triggers { + external_trigger_map.insert(external_id.clone(), config); } - for trigger in &windmill_triggers { + for trigger in windmill_triggers { if !external_trigger_map.contains_key(&trigger.external_id) { // Trigger no longer exists on external service - set error let error_msg = "Trigger no longer exists on external service".to_string(); @@ -276,14 +295,14 @@ pub async fn sync_workspace_triggers( match update_native_trigger_error( db, workspace_id, - T::SERVICE_NAME, + service_name, &trigger.external_id, Some(&error_msg), ) .await { Ok(()) => { - all_synced_triggers.push(TriggerSyncInfo { + synced.push(TriggerSyncInfo { external_id: trigger.external_id.clone(), script_path: trigger.script_path.clone(), action: SyncAction::ErrorSet(error_msg), @@ -295,7 +314,7 @@ pub async fn sync_workspace_triggers( trigger.external_id, e ); - all_sync_errors.push(SyncError { + errors.push(SyncError { resource_path: format!("workspace:{}", workspace_id), error_message: format!( "Failed to update error for trigger (external_id: '{}'): {}", @@ -308,7 +327,7 @@ pub async fn sync_workspace_triggers( } } else { // Trigger exists on external service - let external_trigger_data = external_trigger_map.get(&trigger.external_id).unwrap(); + let external_service_config = external_trigger_map.get(&trigger.external_id).unwrap(); // Clear error if it was set if trigger.error.is_some() { @@ -321,14 +340,14 @@ pub async fn sync_workspace_triggers( match update_native_trigger_error( db, workspace_id, - T::SERVICE_NAME, + service_name, &trigger.external_id, None, ) .await { Ok(()) => { - all_synced_triggers.push(TriggerSyncInfo { + synced.push(TriggerSyncInfo { external_id: trigger.external_id.clone(), script_path: trigger.script_path.clone(), action: SyncAction::ErrorCleared, @@ -340,7 +359,7 @@ pub async fn sync_workspace_triggers( trigger.external_id, e ); - all_sync_errors.push(SyncError { + errors.push(SyncError { resource_path: format!("workspace:{}", workspace_id), error_message: format!( "Failed to clear error for trigger (external_id: '{}'): {}", @@ -353,14 +372,12 @@ pub async fn sync_workspace_triggers( } // Compare service_config and update if different - let external_service_config = - handler.extract_service_config_from_trigger_data(external_trigger_data)?; let stored_service_config = trigger .service_config .clone() .unwrap_or(serde_json::Value::Null); - if external_service_config != stored_service_config { + if **external_service_config != stored_service_config { tracing::info!( "Trigger (external_id: '{}', script_path: '{}') config differs from external service, updating local config", trigger.external_id, @@ -370,14 +387,14 @@ pub async fn sync_workspace_triggers( match update_native_trigger_service_config( db, workspace_id, - T::SERVICE_NAME, + service_name, &trigger.external_id, - &external_service_config, + external_service_config, ) .await { Ok(()) => { - all_synced_triggers.push(TriggerSyncInfo { + synced.push(TriggerSyncInfo { external_id: trigger.external_id.clone(), script_path: trigger.script_path.clone(), action: SyncAction::ConfigUpdated, @@ -389,7 +406,7 @@ pub async fn sync_workspace_triggers( trigger.external_id, e ); - all_sync_errors.push(SyncError { + errors.push(SyncError { resource_path: format!("workspace:{}", workspace_id), error_message: format!( "Failed to update config for trigger (external_id: '{}'): {}", @@ -408,14 +425,4 @@ pub async fn sync_workspace_triggers( } } } - - tracing::info!( - "Sync completed for {} in workspace '{}'. Updated: {}, Errors: {}", - T::SERVICE_NAME.as_str(), - workspace_id, - all_synced_triggers.len(), - all_sync_errors.len() - ); - - Ok((all_synced_triggers, all_sync_errors)) } diff --git a/backend/windmill-native-triggers/src/workspace_integrations.rs b/backend/windmill-native-triggers/src/workspace_integrations.rs index 3891905411870..a807d982e0780 100644 --- a/backend/windmill-native-triggers/src/workspace_integrations.rs +++ b/backend/windmill-native-triggers/src/workspace_integrations.rs @@ -23,6 +23,7 @@ use windmill_audit::{audit_oss::audit_log, ActionKind}; use windmill_common::{ db::UserDB, error::{Error, JsonResult, Result}, + global_settings::{load_value_from_global_settings, OAUTH_SETTING}, utils::require_admin, variables::{build_crypt, encrypt}, DB, @@ -32,10 +33,10 @@ use windmill_common::{ use windmill_api_auth::ApiAuthed; #[cfg(feature = "native_trigger")] -use crate::ServiceName; - -#[cfg(feature = "native_trigger")] -use crate::{delete_workspace_integration, store_workspace_integration}; +use crate::{ + decrypt_oauth_data, delete_token_by_prefix, delete_workspace_integration, resolve_endpoint, + store_workspace_integration, ServiceName, +}; #[cfg(feature = "native_trigger")] use windmill_oauth::{OClient, Url, OAUTH_HTTP_CLIENT}; @@ -45,6 +46,8 @@ use base64::{engine::general_purpose::URL_SAFE_NO_PAD, Engine}; #[cfg(feature = "native_trigger")] use hmac::{Hmac, Mac}; #[cfg(feature = "native_trigger")] +use serde_json::json; +#[cfg(feature = "native_trigger")] use sha2::Sha256; #[cfg(feature = "native_trigger")] @@ -173,10 +176,14 @@ pub struct ConnectIntegrationResponse { #[cfg(feature = "native_trigger")] #[derive(FromRow, Debug, Clone, Serialize, Deserialize)] pub struct WorkspaceOAuthConfig { + #[serde(default)] pub client_id: String, + #[serde(default)] pub client_secret: String, + #[serde(default)] pub base_url: String, - pub access_token: Option, + #[serde(default)] + pub instance_shared: bool, } #[cfg(feature = "native_trigger")] @@ -201,20 +208,98 @@ async fn generate_connect_url( // Generate a signed state that is cluster-safe let state = generate_signed_state(&db, &workspace_id, service_name).await?; - let auth_url = build_authorization_url(&oauth_config, &state, &redirect_uri); + let auth_url = build_authorization_url(&oauth_config, service_name, &state, &redirect_uri); Ok(Json(auth_url)) } +#[cfg(feature = "native_trigger")] +#[derive(Debug, Deserialize)] +struct BasicOAuthData { + base_url: String, + access_token: String, +} + +#[cfg(feature = "native_trigger")] +async fn try_delete_nextcloud_webhook(base_url: &str, access_token: &str, external_id: &str) { + let url = format!( + "{}/ocs/v2.php/apps/webhook_listeners/api/v1/webhooks/{}", + base_url, external_id + ); + let client = reqwest::Client::new(); + let _ = client + .delete(&url) + .bearer_auth(access_token) + .header("OCS-APIRequest", "true") + .send() + .await; +} + +/// Delete all native triggers for a workspace+service, including remote webhook cleanup. +/// This is best-effort: errors during remote cleanup or token deletion are logged but ignored. +#[cfg(feature = "native_trigger")] +async fn delete_triggers_for_service(db: &DB, workspace_id: &str, service_name: ServiceName) { + let triggers = sqlx::query!( + "SELECT external_id, webhook_token_prefix FROM native_trigger WHERE workspace_id = $1 AND service_name = $2", + workspace_id, + service_name as ServiceName + ) + .fetch_all(db) + .await + .unwrap_or_default(); + + if triggers.is_empty() { + return; + } + + // For Nextcloud: try to delete webhooks on the remote instance (best-effort) + if service_name == ServiceName::Nextcloud { + if let Ok(oauth_data) = + decrypt_oauth_data::<_, BasicOAuthData>(db, db, workspace_id, service_name).await + { + for trigger in &triggers { + try_delete_nextcloud_webhook( + &oauth_data.base_url, + &oauth_data.access_token, + &trigger.external_id, + ) + .await; + } + } + } + // For Google: skip remote cleanup (watch channels expire naturally) + + // Bulk delete all triggers + let _ = sqlx::query!( + "DELETE FROM native_trigger WHERE workspace_id = $1 AND service_name = $2", + workspace_id, + service_name as ServiceName + ) + .execute(db) + .await; + + // Delete all associated webhook tokens + for trigger in &triggers { + let _ = delete_token_by_prefix(db, &trigger.webhook_token_prefix).await; + } +} + #[cfg(feature = "native_trigger")] async fn delete_integration( authed: ApiAuthed, + Extension(db): Extension, Extension(user_db): Extension, Path((workspace_id, service_name)): Path<(String, ServiceName)>, ) -> JsonResult { require_admin(authed.is_admin, &workspace_id)?; + // Delete triggers first (needs OAuth data that cleanup_oauth_resource will remove) + delete_triggers_for_service(&db, &workspace_id, service_name).await; + let mut tx = user_db.begin(&authed).await?; + // Clean up account+variable+resource + cleanup_oauth_resource(&mut *tx, &workspace_id, service_name).await; + let deleted = delete_workspace_integration(&mut *tx, &workspace_id, service_name).await?; if !deleted { @@ -248,6 +333,7 @@ async fn delete_integration( struct WorkspaceIntegrations { service_name: ServiceName, oauth_data: Option>, + resource_path: Option, } #[cfg(feature = "native_trigger")] @@ -263,8 +349,9 @@ async fn list_integrations( WorkspaceIntegrations, r#" SELECT - oauth_data as "oauth_data!: sqlx::types::Json", - service_name as "service_name!: ServiceName" + oauth_data as "oauth_data: sqlx::types::Json", + service_name as "service_name!: ServiceName", + resource_path FROM workspace_integrations WHERE @@ -277,14 +364,23 @@ async fn list_integrations( let key_value = integrations .into_iter() - .map(|integration| (integration.service_name, integration.oauth_data)) + .map(|integration| { + ( + integration.service_name, + (integration.oauth_data, integration.resource_path), + ) + }) .collect::>(); use strum::IntoEnumIterator; let integrations = ServiceName::iter() - .map(|service_name| WorkspaceIntegrations { - service_name: service_name, - oauth_data: key_value.get(&service_name).cloned().flatten(), + .map(|service_name| { + let (oauth_data, resource_path) = key_value + .get(&service_name) + .cloned() + .map(|(od, rp)| (od, rp)) + .unwrap_or((None, None)); + WorkspaceIntegrations { service_name, oauth_data, resource_path } }) .collect::>(); @@ -304,10 +400,10 @@ async fn integration_exist( r#" SELECT EXISTS ( SELECT 1 - FROM workspace_integrations - WHERE workspace_id = $1 - AND service_name = $2 - AND oauth_data IS NOT NULL + FROM workspace_integrations wi + WHERE wi.workspace_id = $1 + AND wi.service_name = $2 + AND wi.oauth_data IS NOT NULL ) "#, workspace_id, @@ -326,18 +422,26 @@ struct RedirectUri { redirect_uri: String, } +#[cfg(feature = "native_trigger")] +#[derive(Debug, Deserialize)] +struct OAuthCallbackBody { + redirect_uri: String, + code: String, + state: String, + resource_path: Option, +} + #[cfg(feature = "native_trigger")] async fn oauth_callback( authed: ApiAuthed, Extension(db): Extension, Extension(user_db): Extension, - Path((workspace_id, service_name, code, state)): Path<(String, ServiceName, String, String)>, - Json(RedirectUri { redirect_uri }): Json, + Path((workspace_id, service_name)): Path<(String, ServiceName)>, + Json(body): Json, ) -> JsonResult { require_admin(authed.is_admin, &workspace_id)?; - // Validate the signed state (cluster-safe, no DB storage needed) - let state_was_valid = validate_signed_state(&db, &state, &workspace_id).await?; + let state_was_valid = validate_signed_state(&db, &body.state, &workspace_id).await?; if !state_was_valid { return Err(Error::BadRequest( @@ -345,31 +449,122 @@ async fn oauth_callback( )); } - let oauth_config = - get_workspace_oauth_config::(&db, &workspace_id, service_name) - .await?; + // Check if this integration uses instance-shared credentials + let existing_oauth_data = sqlx::query_scalar!( + r#"SELECT oauth_data FROM workspace_integrations + WHERE workspace_id = $1 AND service_name = $2"#, + workspace_id, + service_name as ServiceName + ) + .fetch_optional(&db) + .await? + .flatten(); + + let is_instance_shared = existing_oauth_data + .as_ref() + .and_then(|v| v.get("instance_shared")) + .and_then(|v| v.as_bool()) + .unwrap_or(false); + + let oauth_config = if is_instance_shared { + get_instance_oauth_config(&db, service_name).await? + } else { + get_workspace_oauth_config::(&db, &workspace_id, service_name).await? + }; let token_response = - exchange_code_for_token(&oauth_config, service_name, &code, &redirect_uri).await?; + exchange_code_for_token(&oauth_config, service_name, &body.code, &body.redirect_uri) + .await?; + + let resource_path = body + .resource_path + .filter(|p| !p.is_empty()) + .unwrap_or_else(|| { + format!( + "u/{}/native_{}", + authed.username, + service_name.resource_type() + ) + }); + + let expires_in = token_response.expires_in.unwrap_or(3600); let mut tx = user_db.begin(&authed).await?; - let mc = build_crypt(&db, &workspace_id).await?; - let mut oauth_data = serde_json::to_value(oauth_config).unwrap(); + // Clean up any previous account+variable+resource for this integration + cleanup_oauth_resource(&mut *tx, &workspace_id, service_name).await; - let encrypted_access_token = encrypt(&mc, &token_response.access_token); - oauth_data["access_token"] = serde_json::Value::String(encrypted_access_token); + // 1. Create account record for token refresh + let account_id = sqlx::query_scalar!( + "INSERT INTO account (workspace_id, client, expires_at, refresh_token, is_workspace_integration) + VALUES ($1, $2, now() + ($3 || ' seconds')::interval, $4, true) + RETURNING id", + workspace_id, + service_name.as_str(), + expires_in.to_string(), + token_response.refresh_token.as_deref().unwrap_or(""), + ) + .fetch_one(&mut *tx) + .await + .map_err(|e| Error::InternalErr(format!("Failed to create account: {}", e)))?; - if let Some(refresh_token) = token_response.refresh_token { - let encrypted_refresh_token = encrypt(&mc, &refresh_token); - oauth_data["refresh_token"] = serde_json::Value::String(encrypted_refresh_token); - } - if let Some(expires_in) = token_response.expires_in { - let expires_at = chrono::Utc::now() + chrono::Duration::seconds(expires_in as i64); - oauth_data["token_expires_at"] = serde_json::Value::String(expires_at.to_rfc3339()); - } + // 2. Create variable with encrypted access token + let mc = build_crypt(&db, &workspace_id).await?; + let encrypted_access_token = encrypt(&mc, &token_response.access_token); - store_workspace_integration(&mut *tx, &authed, &workspace_id, service_name, oauth_data).await?; + sqlx::query!( + "INSERT INTO variable (workspace_id, path, value, is_secret, description, account, is_oauth) + VALUES ($1, $2, $3, true, $4, $5, true) + ON CONFLICT (workspace_id, path) DO UPDATE + SET value = EXCLUDED.value, account = EXCLUDED.account", + workspace_id, + resource_path, + encrypted_access_token, + format!("OAuth token for {} workspace integration", service_name), + account_id, + ) + .execute(&mut *tx) + .await + .map_err(|e| Error::InternalErr(format!("Failed to create variable: {}", e)))?; + + // 3. Create resource pointing to the variable + let resource_value = json!({ "token": format!("$var:{}", resource_path) }); + + sqlx::query!( + "INSERT INTO resource (workspace_id, path, value, resource_type, description, created_by) + VALUES ($1, $2, $3, $4, $5, $6) + ON CONFLICT (workspace_id, path) DO UPDATE + SET value = EXCLUDED.value, resource_type = EXCLUDED.resource_type", + workspace_id, + resource_path, + resource_value, + service_name.resource_type(), + format!("{} workspace integration", service_name), + authed.username, + ) + .execute(&mut *tx) + .await + .map_err(|e| Error::InternalErr(format!("Failed to create resource: {}", e)))?; + + // 4. Store config + resource_path in workspace_integrations (no tokens). + // For instance-shared integrations, store the flag instead of credentials. + let stored_data = if is_instance_shared { + json!({ + "instance_shared": true, + "base_url": "", + }) + } else { + to_value(&oauth_config).unwrap() + }; + store_workspace_integration( + &mut *tx, + &authed, + &workspace_id, + service_name, + stored_data, + Some(&resource_path), + ) + .await?; audit_log( &mut *tx, @@ -407,16 +602,14 @@ fn build_native_oauth_client( service_name: ServiceName, redirect_uri: &str, ) -> Result { - let auth_url = Url::parse(&format!( - "{}{}", - config.base_url, - service_name.auth_endpoint() + let auth_url = Url::parse(&resolve_endpoint( + &config.base_url, + service_name.auth_endpoint(), )) .map_err(|e| Error::InternalErr(format!("Invalid auth URL: {}", e)))?; - let token_url = Url::parse(&format!( - "{}{}", - config.base_url, - service_name.token_endpoint() + let token_url = Url::parse(&resolve_endpoint( + &config.base_url, + service_name.token_endpoint(), )) .map_err(|e| Error::InternalErr(format!("Invalid token URL: {}", e)))?; let redirect = Url::parse(redirect_uri).map_err(|e| { @@ -459,7 +652,7 @@ async fn get_workspace_oauth_config( workspace_id: &str, service_name: ServiceName, ) -> Result { - let oauth_configs = sqlx::query_scalar!( + let oauth_data = sqlx::query_scalar!( r#" SELECT oauth_data @@ -474,15 +667,14 @@ async fn get_workspace_oauth_config( ) .fetch_optional(db) .await? + .flatten() .ok_or(Error::NotFound(format!( "Integration for service {} not found", service_name.as_str() )))?; - let config = serde_json::from_value::(oauth_configs) - .map_err(|e| Error::InternalErr(format!("Failed to parse OAuth config: {}", e)))?; - - Ok(config) + serde_json::from_value::(oauth_data) + .map_err(|e| Error::InternalErr(format!("Failed to parse OAuth config: {}", e))) } #[cfg(feature = "native_trigger")] @@ -502,6 +694,7 @@ pub async fn create_workspace_integration( &workspace_id, service_name, to_value(oauth_data).unwrap(), + None, ) .await?; @@ -523,24 +716,193 @@ async fn get_workspace_oauth_config_as_oauth_config( #[cfg(feature = "native_trigger")] fn build_authorization_url( config: &WorkspaceOAuthConfig, + service_name: ServiceName, state: &str, redirect_uri: &str, ) -> String { - let params = [ + let base_auth_url = resolve_endpoint(&config.base_url, service_name.auth_endpoint()); + + let mut params = vec![ ("response_type", "code"), - ("client_id", &config.client_id), + ("client_id", config.client_id.as_str()), ("redirect_uri", redirect_uri), ("state", state), - ("scope", "read write"), + ("scope", service_name.oauth_scopes()), ]; + for &(key, value) in service_name.extra_auth_params() { + params.push((key, value)); + } + let query_string = params .iter() .map(|(k, v)| format!("{}={}", urlencoding::encode(k), urlencoding::encode(v))) .collect::>() .join("&"); - format!("{}/apps/oauth2/authorize?{}", config.base_url, query_string) + format!("{}?{}", base_auth_url, query_string) +} + +#[cfg(feature = "native_trigger")] +pub async fn cleanup_oauth_resource( + tx: &mut sqlx::PgConnection, + workspace_id: &str, + service_name: ServiceName, +) { + // Look up the stored resource_path from workspace_integrations + let stored_resource_path: Option = sqlx::query_scalar!( + r#"SELECT resource_path FROM workspace_integrations WHERE workspace_id = $1 AND service_name = $2"#, + workspace_id, + service_name as ServiceName, + ) + .fetch_optional(&mut *tx) + .await + .ok() + .flatten() + .flatten(); + + // Find and delete any existing account+variable+resource for this integration + let account_ids: Vec = sqlx::query_scalar!( + "DELETE FROM account WHERE workspace_id = $1 AND client = $2 AND is_workspace_integration = true RETURNING id", + workspace_id, + service_name.as_str(), + ) + .fetch_all(&mut *tx) + .await + .unwrap_or_default(); + + if !account_ids.is_empty() { + // Delete variables linked to these accounts + let _ = sqlx::query!( + "DELETE FROM variable WHERE workspace_id = $1 AND account = ANY($2)", + workspace_id, + &account_ids, + ) + .execute(&mut *tx) + .await; + } + + // Delete resource by exact stored path, or fall back to legacy pattern + let resource_type = service_name.resource_type(); + if let Some(ref path) = stored_resource_path { + let _ = sqlx::query!( + "DELETE FROM resource WHERE workspace_id = $1 AND path = $2", + workspace_id, + path, + ) + .execute(&mut *tx) + .await; + } else { + // Legacy fallback for integrations created before user-chosen paths + let _ = sqlx::query!( + "DELETE FROM resource WHERE workspace_id = $1 AND resource_type = $2 AND path LIKE 'u/%/native_%'", + workspace_id, + resource_type, + ) + .execute(&mut *tx) + .await; + } +} + +/// Check if the instance admin has enabled sharing of OAuth credentials for a given service. +/// Currently only supported for Google (gworkspace). +#[cfg(feature = "native_trigger")] +async fn is_instance_sharing_enabled(db: &DB, service_name: ServiceName) -> Result { + // Only Google supports instance sharing for now + if service_name != ServiceName::Google { + return Ok(false); + } + + let oauths_value = match load_value_from_global_settings(db, OAUTH_SETTING).await? { + Some(v) => v, + None => return Ok(false), + }; + + let key = service_name.resource_type(); // "gworkspace" + let entry = match oauths_value.get(key) { + Some(v) => v, + None => return Ok(false), + }; + + let id = entry.get("id").and_then(|v| v.as_str()).unwrap_or(""); + let secret = entry.get("secret").and_then(|v| v.as_str()).unwrap_or(""); + let share = entry + .get("share_with_workspaces") + .and_then(|v| v.as_bool()) + .unwrap_or(false); + + Ok(!id.is_empty() && !secret.is_empty() && share) +} + +/// Read instance-level OAuth credentials for a service (when sharing is enabled). +#[cfg(feature = "native_trigger")] +async fn get_instance_oauth_config( + db: &DB, + service_name: ServiceName, +) -> Result { + if !is_instance_sharing_enabled(db, service_name).await? { + return Err(Error::BadRequest( + "Instance credential sharing is not enabled for this service".to_string(), + )); + } + + let (client_id, client_secret) = + windmill_common::global_settings::get_instance_oauth_credentials( + db, + service_name.resource_type(), + ) + .await?; + + Ok(WorkspaceOAuthConfig { + client_id, + client_secret, + base_url: String::new(), // Google uses absolute URLs + instance_shared: false, + }) +} + +#[cfg(feature = "native_trigger")] +async fn check_instance_sharing_available( + authed: ApiAuthed, + Extension(db): Extension, + Path((workspace_id, service_name)): Path<(String, ServiceName)>, +) -> JsonResult { + require_admin(authed.is_admin, &workspace_id)?; + let available = is_instance_sharing_enabled(&db, service_name).await?; + Ok(Json(available)) +} + +#[cfg(feature = "native_trigger")] +async fn generate_instance_connect_url( + authed: ApiAuthed, + Extension(db): Extension, + Extension(user_db): Extension, + Path((workspace_id, service_name)): Path<(String, ServiceName)>, + Json(RedirectUri { redirect_uri }): Json, +) -> JsonResult { + require_admin(authed.is_admin, &workspace_id)?; + + let instance_config = get_instance_oauth_config(&db, service_name).await?; + + // Store a marker in workspace_integrations — NOT the actual credentials. + // The callback and token refresh will read credentials from global settings + // when they see instance_shared=true. + let mut tx = user_db.begin(&authed).await?; + crate::store_workspace_integration( + &mut tx, + &authed, + &workspace_id, + service_name, + json!({ "instance_shared": true, "base_url": "" }), + None, + ) + .await?; + tx.commit().await?; + + // Generate signed state and build authorization URL + let state = generate_signed_state(&db, &workspace_id, service_name).await?; + let auth_url = build_authorization_url(&instance_config, service_name, &state, &redirect_uri); + Ok(Json(auth_url)) } #[cfg(feature = "native_trigger")] @@ -553,8 +915,16 @@ pub fn workspaced_service() -> Router { "/:service_name/generate_connect_url", post(generate_connect_url), ) + .route( + "/:service_name/instance_sharing_available", + get(check_instance_sharing_available), + ) + .route( + "/:service_name/generate_instance_connect_url", + post(generate_instance_connect_url), + ) .route("/:service_name/delete", delete(delete_integration)) - .route("/:service_name/callback/:code/:state", post(oauth_callback)); + .route("/:service_name/callback", post(oauth_callback)); Router::new().nest("/integrations", router) } diff --git a/backend/windmill-store/src/resources.rs b/backend/windmill-store/src/resources.rs index 8fa04f7780c08..b72c39a17b47f 100644 --- a/backend/windmill-store/src/resources.rs +++ b/backend/windmill-store/src/resources.rs @@ -1030,6 +1030,15 @@ async fn update_resource( ) .execute(&mut *tx) .await?; + + sqlx::query!( + "UPDATE workspace_integrations SET resource_path = $1 WHERE workspace_id = $2 AND resource_path = $3", + npath, + w_id, + path + ) + .execute(&mut *tx) + .await?; } } diff --git a/backend/windmill-store/src/variables.rs b/backend/windmill-store/src/variables.rs index 7d83a974894d5..1a2a2c83a2373 100644 --- a/backend/windmill-store/src/variables.rs +++ b/backend/windmill-store/src/variables.rs @@ -803,6 +803,15 @@ async fn update_variable( ) .execute(&mut *tx) .await?; + + sqlx::query!( + "UPDATE workspace_integrations SET resource_path = $1 WHERE workspace_id = $2 AND resource_path = $3", + npath, + w_id, + path + ) + .execute(&mut *tx) + .await?; } } diff --git a/backend/windmill-test-utils/src/lib.rs b/backend/windmill-test-utils/src/lib.rs index 34858fac2e7c4..adbeb83ee7875 100644 --- a/backend/windmill-test-utils/src/lib.rs +++ b/backend/windmill-test-utils/src/lib.rs @@ -8,7 +8,6 @@ use serde_json::json; use sqlx::{postgres::PgListener, Pool, Postgres}; use uuid::Uuid; use windmill_api_client::types::NewScript; -#[cfg(feature = "python")] use windmill_common::flow_status::FlowStatusModule; use windmill_common::{ jobs::{JobKind, JobPayload, RawCode}, @@ -424,7 +423,6 @@ pub async fn listen_for_completed_jobs(db: &Pool) -> impl Stream) -> impl Stream + Unpin { listen_for_uuid_on(db, "queued").await } @@ -486,7 +484,6 @@ pub trait StreamFind: futures::Stream + Unpin + Sized { impl StreamFind for T {} -#[cfg(feature = "python")] pub fn get_module(cjob: &CompletedJob, id: &str) -> Option { cjob.flow_status.clone().and_then(|fs| { use windmill_common::flow_status::FlowStatus; @@ -498,7 +495,6 @@ pub fn get_module(cjob: &CompletedJob, id: &str) -> Option { }) } -#[cfg(feature = "python")] fn find_module_in_vec(modules: Vec, id: &str) -> Option { modules.into_iter().find(|s| s.id() == id) } @@ -836,14 +832,12 @@ pub async fn testing_http_connection(port: u16) -> Connection { let agent_token = format!( "{}{}", windmill_common::agent_workers::AGENT_JWT_PREFIX, - windmill_common::jwt::encode_with_internal_secret( - windmill_api_agent_workers::AgentAuth { - worker_group: "testing-agent".to_owned(), - suffix: Some(suffix.clone()), - tags: vec!["flow".into(), "python3".into(), "dependency".into()], - exp: Some(usize::MAX), - } - ) + windmill_common::jwt::encode_with_internal_secret(windmill_api_agent_workers::AgentAuth { + worker_group: "testing-agent".to_owned(), + suffix: Some(suffix.clone()), + tags: vec!["flow".into(), "python3".into(), "dependency".into()], + exp: Some(usize::MAX), + }) .await .expect("JWT token to be created") ); diff --git a/backend/windmill-types/src/jobs.rs b/backend/windmill-types/src/jobs.rs index 3c54b7956e206..ffd3cc6ef39d0 100644 --- a/backend/windmill-types/src/jobs.rs +++ b/backend/windmill-types/src/jobs.rs @@ -37,6 +37,7 @@ pub enum JobTriggerKind { Schedule, Gcp, Nextcloud, + Google, } impl std::fmt::Display for JobTriggerKind { @@ -54,6 +55,7 @@ impl std::fmt::Display for JobTriggerKind { JobTriggerKind::Schedule => "schedule", JobTriggerKind::Gcp => "gcp", JobTriggerKind::Nextcloud => "nextcloud", + JobTriggerKind::Google => "google", }; write!(f, "{}", kind) } diff --git a/backend/windmill-types/src/triggers.rs b/backend/windmill-types/src/triggers.rs index 64d9a2ce802f1..54c7b52853b19 100644 --- a/backend/windmill-types/src/triggers.rs +++ b/backend/windmill-types/src/triggers.rs @@ -20,6 +20,7 @@ pub enum TriggerKind { Postgres, Gcp, Nextcloud, + Google, } impl TriggerKind { @@ -37,6 +38,7 @@ impl TriggerKind { TriggerKind::Postgres => "postgres".to_string(), TriggerKind::Gcp => "gcp".to_string(), TriggerKind::Nextcloud => "nextcloud".to_string(), + TriggerKind::Google => "google".to_string(), } } } @@ -56,6 +58,7 @@ impl fmt::Display for TriggerKind { TriggerKind::Postgres => "postgres", TriggerKind::Gcp => "gcp", TriggerKind::Nextcloud => "nextcloud", + TriggerKind::Google => "google", }; write!(f, "{}", s) } diff --git a/backend/windmill-worker/src/common.rs b/backend/windmill-worker/src/common.rs index abcb2dbbdd00a..e691ca453b539 100644 --- a/backend/windmill-worker/src/common.rs +++ b/backend/windmill-worker/src/common.rs @@ -542,6 +542,7 @@ pub async fn update_worker_ping_for_failed_init_script( memory_usage: None, wm_memory_usage: None, job_isolation: None, + native_mode: None, ping_type: PingType::InitScript, }, ) diff --git a/backend/windmill-worker/src/worker.rs b/backend/windmill-worker/src/worker.rs index 00535dbd178cf..868fcec064c75 100644 --- a/backend/windmill-worker/src/worker.rs +++ b/backend/windmill-worker/src/worker.rs @@ -59,7 +59,7 @@ use std::{ collections::{HashMap, HashSet}, fmt::Display, sync::{ - atomic::{AtomicBool, AtomicU8, AtomicU16, Ordering}, + atomic::{AtomicBool, AtomicU16, AtomicU8, Ordering}, Arc, }, time::Duration, @@ -80,7 +80,7 @@ use windmill_common::{ scripts::{get_full_hub_script_by_path, ScriptHash, ScriptLang}, tracing_init::{QUIET_MODE, VERBOSE_TARGET}, utils::StripPath, - worker::{CLOUD_HOSTED, NO_LOGS, WORKER_CONFIG, WORKER_GROUP}, + worker::{CLOUD_HOSTED, NATIVE_TAGS, NO_LOGS, WORKER_CONFIG, WORKER_GROUP}, DB, IS_READY, }; @@ -2989,6 +2989,17 @@ pub async fn handle_queued_job( _ => {} } + { + let native_mode = WORKER_CONFIG.read().await.native_mode; + if native_mode && !NATIVE_TAGS.contains(&job.tag) { + return Err(Error::ExecutionErr(format!( + "Worker is in native mode and cannot execute non-native job with tag '{}'. Native tags: {}", + job.tag, + NATIVE_TAGS.join(", ") + ))); + } + } + #[cfg(any(not(feature = "enterprise"), feature = "sqlx"))] match conn { Connection::Sql(db) => { diff --git a/backend/windmill-worker/src/worker_utils.rs b/backend/windmill-worker/src/worker_utils.rs index 396b9273abcee..20d7e0a542a7c 100644 --- a/backend/windmill-worker/src/worker_utils.rs +++ b/backend/windmill-worker/src/worker_utils.rs @@ -27,7 +27,10 @@ pub(crate) async fn update_worker_ping_full( occupancy_metrics: &mut OccupancyMetrics, killpill_tx: &KillpillSender, ) { - let tags = WORKER_CONFIG.read().await.worker_tags.clone(); + let wc = WORKER_CONFIG.read().await; + let tags = wc.worker_tags.clone(); + let native_mode = wc.native_mode; + drop(wc); let memory_usage = get_worker_memory_usage(); let wm_memory_usage = get_windmill_memory_usage(); @@ -60,6 +63,7 @@ pub(crate) async fn update_worker_ping_full( occupancy_rate_15s, occupancy_rate_5m, occupancy_rate_30m, + native_mode, ) }) .retry( @@ -105,6 +109,7 @@ async fn update_worker_ping_full_inner( occupancy_rate_15s: Option, occupancy_rate_5m: Option, occupancy_rate_30m: Option, + native_mode: bool, ) -> anyhow::Result<()> { match conn { Connection::Sql(db) => { @@ -120,6 +125,7 @@ async fn update_worker_ping_full_inner( occupancy_rate_15s, occupancy_rate_5m, occupancy_rate_30m, + native_mode, db, ) .await?; @@ -148,6 +154,7 @@ async fn update_worker_ping_full_inner( memory_usage: get_worker_memory_usage(), wm_memory_usage: get_windmill_memory_usage(), job_isolation: None, + native_mode: Some(native_mode), ping_type: PingType::MainLoop, }, ) @@ -163,7 +170,7 @@ pub async fn insert_ping( ip: &str, db: &Connection, ) -> anyhow::Result<()> { - let (tags, dw, dws) = { + let (tags, dw, dws, native_mode) = { let wc = WORKER_CONFIG.read().await.clone(); ( wc.worker_tags, @@ -176,6 +183,7 @@ pub async fn insert_ping( .map(|x| format!("{}:{}", x.workspace_id, x.path)) .collect::>() }), + wc.native_mode, ) }; @@ -204,6 +212,7 @@ pub async fn insert_ping( vcpus, memory, job_isolation, + native_mode, db, ) .await?; @@ -232,6 +241,7 @@ pub async fn insert_ping( memory_usage: get_worker_memory_usage(), wm_memory_usage: get_windmill_memory_usage(), job_isolation, + native_mode: Some(native_mode), ping_type: PingType::Initial, }, ) @@ -305,6 +315,7 @@ pub async fn update_worker_ping_from_job( occupancy_rate_5m: occupancy_rate_5m, occupancy_rate_30m: occupancy_rate_30m, job_isolation, + native_mode: None, }, ) .await?; diff --git a/docker-compose.yml b/docker-compose.yml index 7af40e7025185..7152b8bf5522f 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -103,6 +103,7 @@ services: - DATABASE_URL=${DATABASE_URL} - MODE=worker - WORKER_GROUP=native + - NATIVE_MODE=true - NUM_WORKERS=8 - SLEEP_QUEUE=200 depends_on: diff --git a/frontend/src/lib/components/AuthSettings.svelte b/frontend/src/lib/components/AuthSettings.svelte index c157808017f4d..d3d4890e9c09b 100644 --- a/frontend/src/lib/components/AuthSettings.svelte +++ b/frontend/src/lib/components/AuthSettings.svelte @@ -15,6 +15,7 @@ import NextcloudSetting from '$lib/components/NextcloudSetting.svelte' import CustomOauth from './CustomOauth.svelte' import { capitalize, type Item } from '$lib/utils' + import ClipboardPanel from './details/ClipboardPanel.svelte' import Toggle from './Toggle.svelte' import DropdownV2 from './DropdownV2.svelte' import { APP_TO_ICON_COMPONENT } from './icons' @@ -461,6 +462,42 @@ /> {/if} + {#if k === 'gworkspace'} +
+ { + if (oauths && oauths[k]) { + oauths[k] = { ...oauths[k], share_with_workspaces: e.detail } + } + }} + /> + {#if oauths[k]?.share_with_workspaces} +

+ Workspace admins will be able to connect Google native triggers without + configuring their own OAuth client. The credentials are not exposed to them. +

+

+ Add the following redirect URI to + Google Cloud Console: +

+
+ +
+ {/if} +
+ {/if} {/if} diff --git a/frontend/src/lib/components/WorkerGroup.svelte b/frontend/src/lib/components/WorkerGroup.svelte index 820f21599e27e..04ffda72be09e 100644 --- a/frontend/src/lib/components/WorkerGroup.svelte +++ b/frontend/src/lib/components/WorkerGroup.svelte @@ -88,6 +88,7 @@ pip_local_dependencies?: string[] min_alive_workers_alert_threshold?: number autoscaling?: AutoscalingConfig + native_mode?: boolean } = $state({}) function loadNConfig() { @@ -192,6 +193,7 @@ autoscaling?: AutoscalingConfig periodic_script_bash?: string periodic_script_interval_seconds?: number + native_mode?: boolean } activeWorkers: number customTags: string[] | undefined @@ -275,6 +277,15 @@ ? 'dedicated' : 'normal' ) + let isNativeMode = $derived( + config?.native_mode === true || + name === 'native' || + (workers.length > 0 && + workers.some(([_, pings]) => pings.some((p) => p.native_mode === true))) + ) + let nonNativeTags = $derived( + (nconfig?.worker_tags ?? []).filter((t) => !nativeTags.includes(t)) + ) $effect(() => { ;($superadmin || $devopsRole) && listWorkspaces() }) @@ -525,6 +536,37 @@ {/if} {/if} + + {#if nconfig !== undefined} +
+ + {/if} {:else if selected == 'dedicated'}
{#if $superadmin || $devopsRole} @@ -1019,6 +1061,9 @@ >{#snippet text()}Number of active workers of this group in the last 15 seconds{/snippet} + {#if isNativeMode} + Native + {/if}
{#if vcpus_memory?.vcpus} diff --git a/frontend/src/lib/components/graph/renderers/triggers/TriggersBadge.svelte b/frontend/src/lib/components/graph/renderers/triggers/TriggersBadge.svelte index 1aa74f99148ea..01485fa5193b3 100644 --- a/frontend/src/lib/components/graph/renderers/triggers/TriggersBadge.svelte +++ b/frontend/src/lib/components/graph/renderers/triggers/TriggersBadge.svelte @@ -14,6 +14,7 @@ import TriggerLabel from '$lib/components/triggers/TriggerLabel.svelte' import CountBadge from '$lib/components/common/badge/CountBadge.svelte' import NextcloudIcon from '$lib/components/icons/NextcloudIcon.svelte' + import GoogleIcon from '$lib/components/icons/GoogleIcon.svelte' const { triggersState, triggersCount } = getContext('TriggerContext') @@ -70,7 +71,8 @@ gcp: { icon: GoogleCloudIcon, countKey: 'gcp_count', disabled: !$enterpriseLicense }, poll: { icon: SchedulePollIcon }, cli: { icon: Terminal }, - nextcloud: { icon: NextcloudIcon, countKey: 'nextcloud_count' } + nextcloud: { icon: NextcloudIcon, countKey: 'nextcloud_count' }, + google: { icon: GoogleIcon, countKey: 'google_count' } } // Add native trigger services that are available @@ -102,7 +104,8 @@ 'email', 'poll', 'cli', - 'nextcloud' + 'nextcloud', + 'google' ]) function camelCaseToWords(s: string) { diff --git a/frontend/src/lib/components/icons/GoogleCalendarIcon.svelte b/frontend/src/lib/components/icons/GoogleCalendarIcon.svelte new file mode 100644 index 0000000000000..65750c7a7d479 --- /dev/null +++ b/frontend/src/lib/components/icons/GoogleCalendarIcon.svelte @@ -0,0 +1,46 @@ + + + + + + + + + + + + + + + diff --git a/frontend/src/lib/components/icons/GoogleDriveIcon.svelte b/frontend/src/lib/components/icons/GoogleDriveIcon.svelte new file mode 100644 index 0000000000000..0cccffa372320 --- /dev/null +++ b/frontend/src/lib/components/icons/GoogleDriveIcon.svelte @@ -0,0 +1,35 @@ + + + + + + + + + + diff --git a/frontend/src/lib/components/icons/GoogleIcon.svelte b/frontend/src/lib/components/icons/GoogleIcon.svelte index b9e73ef98012f..bd53eb1bab2d5 100644 --- a/frontend/src/lib/components/icons/GoogleIcon.svelte +++ b/frontend/src/lib/components/icons/GoogleIcon.svelte @@ -1,15 +1,36 @@ - - - - - + + + + + diff --git a/frontend/src/lib/components/triggers.ts b/frontend/src/lib/components/triggers.ts index 7483b0c76bea7..221ddbb3b836a 100644 --- a/frontend/src/lib/components/triggers.ts +++ b/frontend/src/lib/components/triggers.ts @@ -57,6 +57,7 @@ export type TriggerKind = | 'sqs' | 'gcp' | 'nextcloud' + | 'google' export function captureTriggerKindToTriggerKind(kind: CaptureTriggerKind): TriggerKind { switch (kind) { case 'webhook': diff --git a/frontend/src/lib/components/triggers/TriggersEditor.svelte b/frontend/src/lib/components/triggers/TriggersEditor.svelte index 0897725f06c4d..a0e65641ca136 100644 --- a/frontend/src/lib/components/triggers/TriggersEditor.svelte +++ b/frontend/src/lib/components/triggers/TriggersEditor.svelte @@ -243,12 +243,22 @@ ) } else if (triggerType === 'nextcloud') { await triggersState.fetchNativeTriggers( + triggersCount, 'nextcloud', $workspaceStore, currentPath, isFlow, $userStore ) + } else if (triggerType === 'google') { + await triggersState.fetchNativeTriggers( + triggersCount, + 'google', + $workspaceStore, + currentPath, + isFlow, + $userStore + ) } triggersState.selectedTriggerIndex = triggersState.triggers.findIndex( diff --git a/frontend/src/lib/components/triggers/native/NativeTriggerEditor.svelte b/frontend/src/lib/components/triggers/native/NativeTriggerEditor.svelte index 8401e3da2cdd2..b7fdb5a546bd2 100644 --- a/frontend/src/lib/components/triggers/native/NativeTriggerEditor.svelte +++ b/frontend/src/lib/components/triggers/native/NativeTriggerEditor.svelte @@ -18,6 +18,7 @@ import Section from '$lib/components/Section.svelte' import Required from '$lib/components/Required.svelte' import NextcloudTriggerForm from './services/nextcloud/NextcloudTriggerForm.svelte' + import GoogleTriggerForm from './services/google/GoogleTriggerForm.svelte' import TriggerEditorToolbar from '$lib/components/triggers/TriggerEditorToolbar.svelte' import { handleConfigChange, type Trigger } from '$lib/components/triggers/utils' import { deepEqual } from 'fast-equals' @@ -71,6 +72,8 @@ switch (service) { case 'nextcloud': return NextcloudTriggerForm + case 'google': + return GoogleTriggerForm default: return null } @@ -231,9 +234,6 @@ let isValid = $derived.by(() => Object.keys(validationErrors).length === 0) let hasChanged = $derived(!deepEqual(getSaveCfg(), originalConfig ?? {})) - $effect(() => { - console.log('loading', getSaveCfg(), originalConfig) - }) let saveDisabled = $derived( loading || !isValid || diff --git a/frontend/src/lib/components/triggers/native/NativeTriggerTable.svelte b/frontend/src/lib/components/triggers/native/NativeTriggerTable.svelte index e6dcbf47d52b7..6314c12e825ec 100644 --- a/frontend/src/lib/components/triggers/native/NativeTriggerTable.svelte +++ b/frontend/src/lib/components/triggers/native/NativeTriggerTable.svelte @@ -13,6 +13,8 @@ import ConfirmationModal from '$lib/components/common/confirmationModal/ConfirmationModal.svelte' import { goto } from '$lib/navigation' import Alert from '$lib/components/common/alert/Alert.svelte' + import GoogleDriveIcon from '$lib/components/icons/GoogleDriveIcon.svelte' + import GoogleCalendarIcon from '$lib/components/icons/GoogleCalendarIcon.svelte' type TriggerW = ExtendedNativeTrigger & { marked?: any } @@ -99,6 +101,20 @@ {trigger.script_path} {/if} + {#if service === 'google'} + {@const triggerType = trigger.service_config?.triggerType} + {@const resourceName = trigger.service_config?.resourceName} + {@const calendarName = trigger.service_config?.calendarName} +
+ {#if triggerType === 'calendar'} + + Calendar: {calendarName || trigger.service_config?.calendarId || ''} + {:else} + + Drive: {resourceName ? resourceName : trigger.service_config?.resourceId ? trigger.service_config.resourceId : 'All changes'} + {/if} +
+ {/if}
external ID: {trigger.external_id}
diff --git a/frontend/src/lib/components/triggers/native/services/google/GoogleCalendarPicker.svelte b/frontend/src/lib/components/triggers/native/services/google/GoogleCalendarPicker.svelte new file mode 100644 index 0000000000000..98c23f8755cad --- /dev/null +++ b/frontend/src/lib/components/triggers/native/services/google/GoogleCalendarPicker.svelte @@ -0,0 +1,128 @@ + + +
+
+ Calendars +
+ +
+ + {#if calendarId} +
+ + + Selected: {calendarName || calendarId} + + +
+ {/if} + +
+ {#if loading} +
+ + Loading calendars... +
+ {:else if calendars.length === 0} +
+ No calendars found. Check that the Google Calendar API is enabled. +
+ {:else} + {#each calendars as cal (cal.id)} +
+ + + {#if cal.primary} + Primary + {/if} +
+ {/each} + {/if} +
+
diff --git a/frontend/src/lib/components/triggers/native/services/google/GoogleDrivePicker.svelte b/frontend/src/lib/components/triggers/native/services/google/GoogleDrivePicker.svelte new file mode 100644 index 0000000000000..359dfc3536373 --- /dev/null +++ b/frontend/src/lib/components/triggers/native/services/google/GoogleDrivePicker.svelte @@ -0,0 +1,359 @@ + + +
+ {#if resourceId} +
+ + + Selected: {resourceName || resourceId} + + +
+ {:else} +
+ + No file selected +
+ {/if} + +
+ + + +
+ +
+ + {#if activeTab !== 'shared_drives' || currentParentId !== 'root'} +
+ + + {#if searchQuery} + + {/if} +
+ {/if} + + {#if !searchQuery} +
+ {#each breadcrumbs as crumb, i (crumb.id + '-' + i)} + {#if i > 0} + + {/if} + {#if i < breadcrumbs.length - 1} + + {:else} + {crumb.name} + {/if} + {/each} +
+ {/if} + +
+ {#if loadingFiles} +
+ + Loading... +
+ {:else if activeTab === 'shared_drives' && currentParentId === 'root'} + {#if sharedDrives.length === 0} +
+ No shared drives found +
+ {:else} + {#each sharedDrives as drive (drive.id)} +
+ + + +
+ {/each} + {/if} + {:else if files.length === 0} +
+ {searchQuery ? 'No files match your search' : 'This folder is empty'} +
+ {:else} + {#each files as file (file.id)} + {#if file.is_folder} +
+ + + +
+ {:else} +
+ {#if file.id === resourceId} + + {:else} + + {/if} + +
+ {/if} + {/each} + + {#if nextPageToken} +
+ +
+ {/if} + {/if} +
+
diff --git a/frontend/src/lib/components/triggers/native/services/google/GoogleTriggerForm.svelte b/frontend/src/lib/components/triggers/native/services/google/GoogleTriggerForm.svelte new file mode 100644 index 0000000000000..e0b18945b48ef --- /dev/null +++ b/frontend/src/lib/components/triggers/native/services/google/GoogleTriggerForm.svelte @@ -0,0 +1,175 @@ + + +
+ {#if loading} +
+ + Loading configuration... +
+ {:else} +
+
+

Trigger Type

+ + {#snippet children({ item })} + + + {/snippet} + +
+ + {#if triggerType === 'drive'} +
+

Watch Mode

+ + {#snippet children({ item })} + + + {/snippet} + + + {#if driveWatchMode === 'file'} +

File

+ + {#if errors.resourceId} +

{errors.resourceId}

+ {/if} + {:else} +

+ Watches all changes across your Google Drive. The trigger will fire whenever any file is created, modified, or deleted. +

+ {/if} +
+ {:else if triggerType === 'calendar'} +
+

Calendar

+ + {#if errors.calendarId} +

{errors.calendarId}

+ {/if} +
+

+ Requires the Google Calendar API to be enabled in your Google Cloud project. +

+
+
+ {/if} +
+ {/if} +
diff --git a/frontend/src/lib/components/triggers/native/utils.ts b/frontend/src/lib/components/triggers/native/utils.ts index ab78f490e6e6f..c442013ba27af 100644 --- a/frontend/src/lib/components/triggers/native/utils.ts +++ b/frontend/src/lib/components/triggers/native/utils.ts @@ -27,6 +27,14 @@ export const NATIVE_TRIGGER_SERVICES: Record { switch (service) { case 'nextcloud': return (await import('$lib/components/icons/NextcloudIcon.svelte')).default + case 'google': + return (await import('$lib/components/icons/GoogleIcon.svelte')).default } } diff --git a/frontend/src/lib/components/triggers/triggers.svelte.ts b/frontend/src/lib/components/triggers/triggers.svelte.ts index 3e56efff33c7d..72c4a1c5f2e3b 100644 --- a/frontend/src/lib/components/triggers/triggers.svelte.ts +++ b/frontend/src/lib/components/triggers/triggers.svelte.ts @@ -465,6 +465,7 @@ export class Triggers { } async fetchNativeTriggers( + triggersCountStore: Writable, serviceName: NativeServiceName, workspaceId: string | undefined, path: string, @@ -487,7 +488,14 @@ export class Triggers { extra_perms: {}, service_config: t.service_config })) - this.updateTriggers(triggerData, serviceName, user) + const count = this.updateTriggers(triggerData, serviceName, user) + const countProperty = `${serviceName}_count` + triggersCountStore.update((triggersCount) => { + return { + ...(triggersCount ?? {}), + [countProperty]: count + } + }) } catch (error) { console.debug(`Failed to fetch ${serviceName} triggers:`, error) } @@ -510,7 +518,8 @@ export class Triggers { this.fetchWebsocketTriggers(triggersCountStore, workspaceId, path, isFlow, user), this.fetchPostgresTriggers(triggersCountStore, workspaceId, path, isFlow, user), this.fetchMqttTriggers(triggersCountStore, workspaceId, path, isFlow, user), - this.fetchNativeTriggers('nextcloud', workspaceId, path, isFlow, user), + this.fetchNativeTriggers(triggersCountStore, 'nextcloud', workspaceId, path, isFlow, user), + this.fetchNativeTriggers(triggersCountStore, 'google', workspaceId, path, isFlow, user), ...(get(enterpriseLicense) ? [ this.fetchKafkaTriggers(triggersCountStore, workspaceId, path, isFlow, user), diff --git a/frontend/src/lib/components/triggers/utils.ts b/frontend/src/lib/components/triggers/utils.ts index 0256824205041..c188220e254a0 100644 --- a/frontend/src/lib/components/triggers/utils.ts +++ b/frontend/src/lib/components/triggers/utils.ts @@ -28,6 +28,7 @@ import type { Triggers } from './triggers.svelte' import { emptyString } from '$lib/utils' import { saveEmailTriggerFromCfg } from './email/utils' import NextcloudIcon from '$lib/components/icons/NextcloudIcon.svelte' +import GoogleIcon from '$lib/components/icons/GoogleIcon.svelte' import { saveNativeTriggerFromCfg } from './native/utils' export const CLOUD_DISABLED_TRIGGER_TYPES = [ @@ -57,6 +58,7 @@ export type TriggerType = | 'poll' | 'cli' | 'nextcloud' + | 'google' export const jobTriggerKinds: JobTriggerKind[] = [ 'webhook', @@ -70,7 +72,8 @@ export const jobTriggerKinds: JobTriggerKind[] = [ 'sqs', 'postgres', 'schedule', - 'gcp' + 'gcp', + 'google' ] export type Trigger = { @@ -103,7 +106,8 @@ export const triggerIconMap = { primary_schedule: Calendar, poll: SchedulePollIcon, cli: Terminal, - nextcloud: NextcloudIcon + nextcloud: NextcloudIcon, + google: GoogleIcon } export const triggerDisplayNamesMap = { @@ -121,7 +125,8 @@ export const triggerDisplayNamesMap = { webhook: 'Webhook', default_email: 'Default Email', cli: 'CLI', - nextcloud: 'Nextcloud' + nextcloud: 'Nextcloud', + google: 'Google' } as const satisfies Record /** @@ -176,7 +181,8 @@ export function updateTriggersCount( email: 'email_count', poll: undefined, cli: undefined, - nextcloud: undefined + nextcloud: 'nextcloud_count', + google: 'google_count' } const countProperty = countPropertyMap[type] @@ -367,6 +373,15 @@ export async function deployTriggers( !trigger.isDraft, workspaceId, usedTriggerKinds + ), + google: (trigger: Trigger) => + saveNativeTriggerFromCfg( + 'google', + trigger.path ?? '', + trigger.draftConfig ?? {}, + !trigger.isDraft, + workspaceId, + usedTriggerKinds ) } @@ -467,6 +482,14 @@ export function getLightConfig( return { local_part: trigger.local_part } } else if (triggerType === 'nextcloud') { return { event: trigger.service_config?.event ?? trigger.event } + } else if (triggerType === 'google') { + return { + trigger_type: trigger.service_config?.triggerType ?? trigger.trigger_type, + resource_id: trigger.service_config?.resourceId ?? trigger.resource_id, + resource_name: trigger.service_config?.resourceName ?? trigger.resource_name, + calendar_id: trigger.service_config?.calendarId ?? trigger.calendar_id, + calendar_name: trigger.service_config?.calendarName ?? trigger.calendar_name + } } else { return undefined } @@ -503,6 +526,15 @@ export function getTriggerLabel(trigger: Trigger): string { return `${config?.local_part}` } else if (type === 'nextcloud' && path) { return `${path}` + } else if (type === 'google' && path) { + const triggerType = config?.trigger_type ?? config?.triggerType + if (triggerType === 'calendar') { + const name = config?.calendar_name ?? config?.calendarName ?? config?.calendar_id ?? '' + return `Calendar: ${name || path}` + } else { + const name = config?.resource_name ?? config?.resourceName ?? '' + return name ? `Drive: ${name}` : config?.resource_id ? `Drive: ${path}` : `Drive: All changes` + } } else if (isDraft && draftConfig?.path) { return `${draftConfig?.path}` } else if (isDraft) { @@ -528,7 +560,8 @@ export function sortTriggers(triggers: Trigger[]): Trigger[] { 'sqs', 'gcp', 'email', - 'nextcloud' + 'nextcloud', + 'google' ] return triggers.sort((a, b) => { diff --git a/frontend/src/lib/components/workspaceSettings/OAuthClientConfig.svelte b/frontend/src/lib/components/workspaceSettings/OAuthClientConfig.svelte index 471b3c0de9c5c..1a6f15345f265 100644 --- a/frontend/src/lib/components/workspaceSettings/OAuthClientConfig.svelte +++ b/frontend/src/lib/components/workspaceSettings/OAuthClientConfig.svelte @@ -2,8 +2,8 @@ import { sendUserToast } from '$lib/utils' import { Button, Alert } from '$lib/components/common' import Label from '$lib/components/Label.svelte' + import ClipboardPanel from '$lib/components/details/ClipboardPanel.svelte' import { Save, ExternalLink } from 'lucide-svelte' - import { workspaceStore } from '$lib/stores' interface Props { serviceName: string @@ -15,6 +15,13 @@ } | null redirectUri: string onConfigSaved: (oauthData: any) => Promise + requiresBaseUrl?: boolean + baseUrlLabel?: string + baseUrlPlaceholder?: string + baseUrlSettingsPath?: string + clientIdPlaceholder?: string + clientSecretPlaceholder?: string + setupInstructions?: string[] } let { @@ -22,15 +29,20 @@ serviceDisplayName, existingConfig, onConfigSaved, - redirectUri = $bindable('') + redirectUri, + requiresBaseUrl = true, + baseUrlLabel = `${serviceDisplayName} base URL`, + baseUrlPlaceholder = `https://your-${serviceName}.example.com`, + baseUrlSettingsPath = '/settings/admin/security', + clientIdPlaceholder = `Generated by ${serviceDisplayName} OAuth app`, + clientSecretPlaceholder = `Generated by ${serviceDisplayName} OAuth app`, + setupInstructions }: Props = $props() let saving = $state(false) let clientId = $state('') let clientSecret = $state('') let baseUrl = $state('') - let workspace = $workspaceStore! - redirectUri = `${window.location.origin}/workspace_settings?tab=native_triggers&service=${serviceName}&workspace=${workspace}` $effect(() => { if (existingConfig) { clientId = existingConfig.client_id @@ -39,18 +51,25 @@ } }) + let canSave = $derived( + !saving && + !!clientId.trim() && + !!clientSecret.trim() && + (!requiresBaseUrl || !!baseUrl.trim()) + ) + async function saveConfig() { - if (!clientId.trim() || !clientSecret.trim() || !baseUrl.trim()) { - sendUserToast('Please fill in all fields', true) + if (!canSave) { + sendUserToast('Please fill in all required fields', true) return } saving = true try { - const oauthData = { + const oauthData: Record = { client_id: clientId.trim(), client_secret: clientSecret.trim(), - base_url: baseUrl.trim().replace(/\/+$/, ''), + base_url: requiresBaseUrl ? baseUrl.trim().replace(/\/+$/, '') : '', redirect_uri: redirectUri.trim() } @@ -68,16 +87,7 @@ clientSecret = '' } - function copyToClipboard(text: string, label: string) { - navigator.clipboard - .writeText(text) - .then(() => { - sendUserToast(`${label} copied to clipboard`) - }) - .catch(() => { - sendUserToast(`Failed to copy ${label}`, true) - }) - } +
@@ -89,73 +99,52 @@ {#if !existingConfig}

- Before you can connect to {serviceDisplayName}, you need to configure an OAuth client. This - requires: + Before you can connect to {serviceDisplayName}, you need to configure an OAuth client.

-
    -
  1. Create an OAuth2 application in your {serviceDisplayName} instance (Administration settings - -> Security -> OAuth 2.0 clients)
  2. -
  3. Configure the redirect URI: {redirectUri || - `${window.location.origin}/workspace_settings?tab=integrations&service=${serviceName}`}
  4. -
  5. Enter the client credentials below
  6. -
+ {#if setupInstructions} +
    + {#each setupInstructions as instruction} +
  1. {@html instruction}
  2. + {/each} +
+ {/if}
{/if}
-
-
+ + {#if baseUrl && baseUrlSettingsPath} + + {/if} +
+ {/if}
@@ -163,7 +152,7 @@ @@ -174,7 +163,7 @@ @@ -184,7 +173,7 @@ + +
+
{:else if loading}
{#each new Array(3) as _} @@ -270,7 +388,7 @@ Connected
+ {:else if instanceSharingAvailable[serviceName]} + {:else}
+ {#if instanceSharingAvailable[serviceName] && !isOAuthConfigured} +
+ +

+ Your instance admin has configured Google OAuth for native triggers. Before + connecting, ensure the following redirect URI has been added to the + Google Cloud Console by the instance admin: +

+ +
+
+ {/if} + {#if isShowingConfig}
+ {#if serviceName === 'nextcloud'} + +

Nextcloud integration requires:

+ +
+ {/if} { await createOrUpdateIntegration(serviceName, oauthData) showingConfig = null @@ -343,3 +521,5 @@ {/if} {/if}
+ + diff --git a/frontend/src/routes/(root)/(logged)/+layout.svelte b/frontend/src/routes/(root)/(logged)/+layout.svelte index 492fe05aff0b6..590ff6db215e5 100644 --- a/frontend/src/routes/(root)/(logged)/+layout.svelte +++ b/frontend/src/routes/(root)/(logged)/+layout.svelte @@ -233,7 +233,8 @@ mqtt_used, gcp_used, email_used, - nextcloud_used + nextcloud_used, + google_used } = await WorkspaceService.getUsedTriggers({ workspace: $workspaceStore ?? '' }) @@ -267,6 +268,9 @@ if (nextcloud_used) { usedKinds.push('nextcloud') } + if (google_used) { + usedKinds.push('google') + } $usedTriggerKinds = usedKinds } diff --git a/frontend/src/routes/(root)/(logged)/native_triggers/[service_name]/+page.svelte b/frontend/src/routes/(root)/(logged)/native_triggers/[service_name]/+page.svelte index e7c42baa89803..fce11df6b9a8c 100644 --- a/frontend/src/routes/(root)/(logged)/native_triggers/[service_name]/+page.svelte +++ b/frontend/src/routes/(root)/(logged)/native_triggers/[service_name]/+page.svelte @@ -163,10 +163,18 @@ } } - checkServiceAvailability().then(() => { - if (serviceAvailable) { - loadTriggers() - } + $effect(() => { + // Re-run when serviceName or workspace changes (e.g. navigating between /native_triggers/nextcloud and /google) + void serviceName + void $workspaceStore + triggers = [] + serviceAvailable = undefined + loading = true + checkServiceAvailability().then(() => { + if (serviceAvailable) { + loadTriggers() + } + }) })