diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index a8930196..c93585b9 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -102,7 +102,7 @@ jobs:
- name: Get dependencies
run: go mod download
- name: TF acceptance tests
- timeout-minutes: 10
+ timeout-minutes: 15
env:
TF_ACC: "true"
VERCEL_API_TOKEN: ${{ secrets.VERCEL_API_TOKEN }}
@@ -116,7 +116,7 @@ jobs:
VERCEL_TERRAFORM_TESTING_CERTIFICATE: ${{ secrets.VERCEL_TERRAFORM_TESTING_CERTIFICATE }}
VERCEL_TERRAFORM_TESTING_CERTIFICATE_KEY: ${{ secrets.VERCEL_TERRAFORM_TESTING_CERTIFICATE_KEY }}
run: |
- go test ./...
+ go test -v ./...
summary:
name: Summary
@@ -130,11 +130,11 @@ jobs:
steps:
- name: Success
run: |-
- for status in ${{ join(needs.*.result, ' ') }}
- do
- if [ "$status" != "success" ] && [ "$status" != "skipped" ]
- then
- echo "Some checks failed"
- exit 1
- fi
- done
+ for status in ${{ join(needs.*.result, ' ') }}
+ do
+ if [ "$status" != "success" ] && [ "$status" != "skipped" ]
+ then
+ echo "Some checks failed"
+ exit 1
+ fi
+ done
diff --git a/client/drain.go b/client/drain.go
new file mode 100644
index 00000000..709dc356
--- /dev/null
+++ b/client/drain.go
@@ -0,0 +1,165 @@
+package client
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+)
+
+type Drain struct {
+ ID string `json:"id"`
+ OwnerID string `json:"ownerId"`
+ Name string `json:"name"`
+ Projects string `json:"projects"` // "some" or "all"
+ ProjectIds []string `json:"projectIds"`
+ Schemas map[string]any `json:"schemas"`
+ Delivery DeliveryConfig `json:"delivery"`
+ Sampling []SamplingConfig `json:"sampling,omitempty"`
+ TeamID string `json:"teamId"`
+ Status string `json:"status"`
+ Filter *string `json:"filter,omitempty"`
+ Transforms []TransformConfig `json:"transforms,omitempty"`
+}
+
+type OTLPDeliveryEndpoint struct {
+ Traces string `json:"traces"`
+}
+
+type DeliveryConfig struct {
+ Type string `json:"type"`
+ Endpoint any `json:"endpoint"` // Can be string or object for different delivery types
+ Encoding string `json:"encoding"`
+ Compression *string `json:"compression,omitempty"`
+ Headers map[string]string `json:"headers"`
+ Secret *string `json:"secret,omitempty"`
+}
+
+type SamplingConfig struct {
+ Type string `json:"type"`
+ Rate float64 `json:"rate"` // Must be between 0 and 1
+ Env *string `json:"env,omitempty"`
+ RequestPath *string `json:"requestPath,omitempty"`
+}
+
+type TransformConfig struct {
+ ID string `json:"id"`
+}
+
+type SchemaConfig struct {
+ Version string `json:"version"`
+}
+
+type CreateDrainRequest struct {
+ TeamID string `json:"-"`
+ Name string `json:"name"`
+ Projects string `json:"projects"` // "some" or "all"
+ ProjectIds []string `json:"projectIds,omitempty"`
+ Filter *string `json:"filter,omitempty"`
+ Schemas map[string]SchemaConfig `json:"schemas"`
+ Delivery DeliveryConfig `json:"delivery"`
+ Sampling []SamplingConfig `json:"sampling,omitempty"`
+ Transforms []TransformConfig `json:"transforms,omitempty"`
+}
+
+type UpdateDrainRequest struct {
+ TeamID string `json:"-"`
+ Name *string `json:"name,omitempty"`
+ Projects *string `json:"projects,omitempty"`
+ ProjectIds []string `json:"projectIds,omitempty"`
+ Filter *string `json:"filter,omitempty"`
+ Schemas map[string]SchemaConfig `json:"schemas,omitempty"`
+ Delivery *DeliveryConfig `json:"delivery,omitempty"`
+ Sampling []SamplingConfig `json:"sampling,omitempty"`
+ Transforms []TransformConfig `json:"transforms,omitempty"`
+ Status *string `json:"status,omitempty"` // "enabled" or "disabled"
+}
+
+type ListDrainsResponse struct {
+ Drains []Drain `json:"drains"`
+}
+
+func (c *Client) CreateDrain(ctx context.Context, request CreateDrainRequest) (d Drain, err error) {
+ url := fmt.Sprintf("%s/v1/drains", c.baseURL)
+ if c.TeamID(request.TeamID) != "" {
+ url = fmt.Sprintf("%s?teamId=%s", url, c.TeamID(request.TeamID))
+ }
+ payload := string(mustMarshal(request))
+ tflog.Info(ctx, "creating drain", map[string]any{
+ "url": url,
+ "payload": payload,
+ })
+ err = c.doRequest(clientRequest{
+ ctx: ctx,
+ method: "POST",
+ url: url,
+ body: payload,
+ }, &d)
+ return d, err
+}
+
+func (c *Client) GetDrain(ctx context.Context, id, teamID string) (d Drain, err error) {
+ url := fmt.Sprintf("%s/v1/drains/%s", c.baseURL, id)
+ if c.TeamID(teamID) != "" {
+ url = fmt.Sprintf("%s?teamId=%s", url, c.TeamID(teamID))
+ }
+ tflog.Info(ctx, "reading drain", map[string]any{
+ "url": url,
+ })
+ err = c.doRequest(clientRequest{
+ ctx: ctx,
+ method: "GET",
+ url: url,
+ }, &d)
+ return d, err
+}
+
+func (c *Client) UpdateDrain(ctx context.Context, id string, request UpdateDrainRequest) (d Drain, err error) {
+ url := fmt.Sprintf("%s/v1/drains/%s", c.baseURL, id)
+ if c.TeamID(request.TeamID) != "" {
+ url = fmt.Sprintf("%s?teamId=%s", url, c.TeamID(request.TeamID))
+ }
+ payload := string(mustMarshal(request))
+ tflog.Info(ctx, "updating drain", map[string]any{
+ "url": url,
+ "payload": payload,
+ })
+ err = c.doRequest(clientRequest{
+ ctx: ctx,
+ method: "PATCH",
+ url: url,
+ body: payload,
+ }, &d)
+ return d, err
+}
+
+func (c *Client) DeleteDrain(ctx context.Context, id, teamID string) error {
+ url := fmt.Sprintf("%s/v1/drains/%s", c.baseURL, id)
+ if c.TeamID(teamID) != "" {
+ url = fmt.Sprintf("%s?teamId=%s", url, c.TeamID(teamID))
+ }
+ tflog.Info(ctx, "deleting drain", map[string]any{
+ "url": url,
+ })
+ return c.doRequest(clientRequest{
+ ctx: ctx,
+ method: "DELETE",
+ url: url,
+ }, nil)
+}
+
+func (c *Client) ListDrains(ctx context.Context, teamID string) (response ListDrainsResponse, err error) {
+ url := fmt.Sprintf("%s/v1/drains", c.baseURL)
+ if c.TeamID(teamID) != "" {
+ url = fmt.Sprintf("%s?teamId=%s", url, c.TeamID(teamID))
+ }
+ tflog.Info(ctx, "listing drains", map[string]any{
+ "url": url,
+ })
+ err = c.doRequest(clientRequest{
+ ctx: ctx,
+ method: "GET",
+ url: url,
+ }, &response)
+ return response, err
+}
diff --git a/docs/data-sources/drain.md b/docs/data-sources/drain.md
new file mode 100644
index 00000000..6c3c70ea
--- /dev/null
+++ b/docs/data-sources/drain.md
@@ -0,0 +1,97 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "vercel_drain Data Source - terraform-provider-vercel"
+subcategory: ""
+description: |-
+ Provides information about an existing Drain.
+ Drains collect various types of data including logs, traces, analytics, and speed insights from your Vercel projects.
+ This is a more generic version of log drains that supports multiple data types and delivery methods.
+ Teams on Pro and Enterprise plans can create configurable drains from the Vercel dashboard.
+---
+
+# vercel_drain (Data Source)
+
+Provides information about an existing Drain.
+
+Drains collect various types of data including logs, traces, analytics, and speed insights from your Vercel projects.
+This is a more generic version of log drains that supports multiple data types and delivery methods.
+
+Teams on Pro and Enterprise plans can create configurable drains from the Vercel dashboard.
+
+## Example Usage
+
+```terraform
+data "vercel_drain" "example" {
+ id = "drn_xxxxxxxxxxxxxxxxxxxxxxxx"
+}
+```
+
+
+## Schema
+
+### Required
+
+- `id` (String) The ID of the Drain.
+
+### Optional
+
+- `team_id` (String) The ID of the team the Drain should exist under. Required when configuring a team resource if a default team has not been set in the provider.
+
+### Read-Only
+
+- `delivery` (Attributes) Configuration for how data should be delivered. (see [below for nested schema](#nestedatt--delivery))
+- `filter` (String) A filter expression applied to incoming data.
+- `name` (String) The name of the Drain.
+- `project_ids` (Set of String) A list of project IDs that the drain should be associated with. Only valid when `projects` is set to `some`.
+- `projects` (String) Whether to include all projects or a specific set. Valid values are `all` or `some`.
+- `sampling` (Attributes Set) Sampling configuration for the drain. (see [below for nested schema](#nestedatt--sampling))
+- `schemas` (Map of Object) A map of schema configurations. Keys can be `log`, `trace`, `analytics`, or `speed_insights`. (see [below for nested schema](#nestedatt--schemas))
+- `status` (String) The status of the drain.
+- `transforms` (Attributes Set) Transform configurations for the drain. (see [below for nested schema](#nestedatt--transforms))
+
+
+### Nested Schema for `delivery`
+
+Read-Only:
+
+- `compression` (String) The compression method. Valid values are `gzip` or `none`. Only applicable for HTTP delivery.
+- `encoding` (String) The encoding format. Valid values are `json`, `ndjson` (for HTTP) or `proto` (for OTLP).
+- `endpoint` (Attributes) Endpoint configuration. Contains `url` for HTTP or `traces` for OTLP. (see [below for nested schema](#nestedatt--delivery--endpoint))
+- `headers` (Map of String) Custom headers to include in HTTP requests.
+- `type` (String) The delivery type. Valid values are `http` or `otlphttp`.
+
+
+### Nested Schema for `delivery.endpoint`
+
+Read-Only:
+
+- `traces` (String) The traces endpoint URL for OTLP delivery type.
+- `url` (String) The endpoint URL for HTTP delivery type.
+
+
+
+
+### Nested Schema for `sampling`
+
+Read-Only:
+
+- `environment` (String) The environment to apply sampling to. Valid values are `production` or `preview`.
+- `rate` (Number) The sampling rate from 0 to 1 (e.g., 0.1 for 10%).
+- `request_path` (String) Request path prefix to apply the sampling rule to.
+- `type` (String) The sampling type. Only `head_sampling` is supported.
+
+
+
+### Nested Schema for `schemas`
+
+Read-Only:
+
+- `version` (String)
+
+
+
+### Nested Schema for `transforms`
+
+Read-Only:
+
+- `id` (String) The transform ID.
diff --git a/docs/resources/drain.md b/docs/resources/drain.md
new file mode 100644
index 00000000..2f787b1c
--- /dev/null
+++ b/docs/resources/drain.md
@@ -0,0 +1,234 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "vercel_drain Resource - terraform-provider-vercel"
+subcategory: ""
+description: |-
+ Provides a Configurable Drain resource.
+ Drains collect various types of data including logs, traces, analytics, and speed insights from your Vercel projects.
+ This is a more generic version of log drains that supports multiple data types and delivery methods.
+ Teams on Pro and Enterprise plans can create configurable drains from the Vercel dashboard.
+ ~> Only Pro and Enterprise teams can create Configurable Drains.
+---
+
+# vercel_drain (Resource)
+
+Provides a Configurable Drain resource.
+
+Drains collect various types of data including logs, traces, analytics, and speed insights from your Vercel projects.
+This is a more generic version of log drains that supports multiple data types and delivery methods.
+
+Teams on Pro and Enterprise plans can create configurable drains from the Vercel dashboard.
+
+~> Only Pro and Enterprise teams can create Configurable Drains.
+
+## Example Usage
+
+```terraform
+resource "vercel_project" "example" {
+ name = "example-project"
+}
+
+# Basic HTTP drain for logs
+resource "vercel_drain" "basic_http" {
+ name = "basic-http-logs"
+ projects = "all"
+
+ schemas = {
+ log = {
+ version = "v1"
+ }
+ }
+
+ delivery = {
+ type = "http"
+ endpoint = {
+ url = "https://example.com/webhook"
+ }
+ encoding = "json"
+ headers = {
+ "Authorization" = "Bearer your-token"
+ }
+ }
+}
+
+# Advanced drain with multiple schemas and sampling
+resource "vercel_drain" "advanced" {
+ name = "advanced-multi-schema"
+ projects = "some"
+ project_ids = [vercel_project.example.id]
+ filter = "level >= 'info'"
+
+ schemas = {
+ log = {
+ version = "v1"
+ }
+ trace = {
+ version = "v1"
+ }
+ analytics = {
+ version = "v1"
+ }
+ speed_insights = {
+ version = "v1"
+ }
+ }
+
+ delivery = {
+ type = "http"
+ endpoint = {
+ url = "https://example.com/advanced-drain"
+ }
+ encoding = "ndjson"
+ compression = "gzip"
+ headers = {
+ "Authorization" = "Bearer advanced-token"
+ "Content-Type" = "application/x-ndjson"
+ "X-Custom" = "custom-header"
+ }
+ secret = "your-signing-secret-for-verification"
+ }
+
+ sampling = [
+ {
+ type = "head_sampling"
+ rate = 0.1
+ environment = "production"
+ },
+ {
+ type = "head_sampling"
+ rate = 0.5
+ environment = "preview"
+ request_path = "/api/"
+ }
+ ]
+
+ transforms = [
+ {
+ id = "transform-filter-pii"
+ },
+ {
+ id = "transform-enrich-context"
+ }
+ ]
+}
+
+# OTLP HTTP drain for traces
+resource "vercel_drain" "otlp_traces" {
+ name = "jaeger-traces"
+ projects = "all"
+
+ schemas = {
+ trace = {
+ version = "v1"
+ }
+ }
+
+ delivery = {
+ type = "otlphttp"
+ endpoint = {
+ traces = "https://jaeger.example.com/api/traces"
+ }
+ encoding = "proto"
+ headers = {
+ "Authorization" = "Bearer jaeger-token"
+ }
+ }
+
+ sampling = [
+ {
+ type = "head_sampling"
+ rate = 0.01 # 1% sampling for traces
+ }
+ ]
+}
+```
+
+
+## Schema
+
+### Required
+
+- `delivery` (Attributes) Configuration for how data should be delivered. (see [below for nested schema](#nestedatt--delivery))
+- `name` (String) The name of the Drain.
+- `projects` (String) Whether to include all projects or a specific set. Valid values are `all` or `some`.
+- `schemas` (Map of Object) A map of schema configurations. Keys can be `log`, `trace`, `analytics`, or `speed_insights`. (see [below for nested schema](#nestedatt--schemas))
+
+### Optional
+
+- `filter` (String) A filter expression to apply to incoming data.
+- `project_ids` (Set of String) A list of project IDs that the drain should be associated with. Required when `projects` is `some`.
+- `sampling` (Attributes Set) Sampling configuration for the drain. (see [below for nested schema](#nestedatt--sampling))
+- `team_id` (String) The ID of the team the Drain should exist under. Required when configuring a team resource if a default team has not been set in the provider.
+- `transforms` (Attributes Set) Transform configurations for the drain. (see [below for nested schema](#nestedatt--transforms))
+
+### Read-Only
+
+- `id` (String) The ID of the Drain.
+- `status` (String) The status of the drain.
+
+
+### Nested Schema for `delivery`
+
+Required:
+
+- `type` (String) The delivery type. Valid values are `http` or `otlphttp`
+
+Optional:
+
+- `compression` (String) The compression method. Valid values are `gzip` or `none`.
+- `encoding` (String) The encoding format. Valid values are `json`, `ndjson`, or `proto` (for OTLP).
+- `endpoint` (Attributes) Endpoint configuration. Use `url` for HTTP or `traces` for OTLP. (see [below for nested schema](#nestedatt--delivery--endpoint))
+- `headers` (Map of String) Custom headers to include in HTTP requests.
+- `secret` (String, Sensitive) A secret for signing requests.
+
+
+### Nested Schema for `delivery.endpoint`
+
+Optional:
+
+- `traces` (String) The traces endpoint URL for OTLP delivery type.
+- `url` (String) The endpoint URL for HTTP delivery type.
+
+
+
+
+### Nested Schema for `schemas`
+
+Required:
+
+- `version` (String)
+
+
+
+### Nested Schema for `sampling`
+
+Required:
+
+- `rate` (Number) The sampling rate from 0 to 1 (e.g., 0.1 for 10%).
+- `type` (String) The sampling type. Only `head_sampling` is supported.
+
+Optional:
+
+- `environment` (String) The environment to apply sampling to. Valid values are `production` or `preview`.
+- `request_path` (String) Request path prefix to apply the sampling rule to.
+
+
+
+### Nested Schema for `transforms`
+
+Required:
+
+- `id` (String) The transform ID.
+
+## Import
+
+Import is supported using the following syntax:
+
+```shell
+# If importing into a personal account, or with a team configured on
+# the provider, simply use the drain ID.
+terraform import vercel_drain.example drn_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+
+# Alternatively, you can import via the team_id and drain_id.
+terraform import vercel_drain.example team_xxxxxxxxxxxxxxxxxxxxxxxx/drn_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+```
diff --git a/examples/data-sources/vercel_drain/data-source.tf b/examples/data-sources/vercel_drain/data-source.tf
new file mode 100644
index 00000000..8bfd1bb0
--- /dev/null
+++ b/examples/data-sources/vercel_drain/data-source.tf
@@ -0,0 +1,3 @@
+data "vercel_drain" "example" {
+ id = "drn_xxxxxxxxxxxxxxxxxxxxxxxx"
+}
diff --git a/examples/resources/vercel_drain/import.sh b/examples/resources/vercel_drain/import.sh
new file mode 100644
index 00000000..ec01a72d
--- /dev/null
+++ b/examples/resources/vercel_drain/import.sh
@@ -0,0 +1,6 @@
+# If importing into a personal account, or with a team configured on
+# the provider, simply use the drain ID.
+terraform import vercel_drain.example drn_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+
+# Alternatively, you can import via the team_id and drain_id.
+terraform import vercel_drain.example team_xxxxxxxxxxxxxxxxxxxxxxxx/drn_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
diff --git a/examples/resources/vercel_drain/resource.tf b/examples/resources/vercel_drain/resource.tf
new file mode 100644
index 00000000..d28c2892
--- /dev/null
+++ b/examples/resources/vercel_drain/resource.tf
@@ -0,0 +1,117 @@
+resource "vercel_project" "example" {
+ name = "example-project"
+}
+
+# Basic HTTP drain for logs
+resource "vercel_drain" "basic_http" {
+ name = "basic-http-logs"
+ projects = "all"
+
+ schemas = {
+ log = {
+ version = "v1"
+ }
+ }
+
+ delivery = {
+ type = "http"
+ endpoint = {
+ url = "https://example.com/webhook"
+ }
+ encoding = "json"
+ headers = {
+ "Authorization" = "Bearer your-token"
+ }
+ }
+}
+
+# Advanced drain with multiple schemas and sampling
+resource "vercel_drain" "advanced" {
+ name = "advanced-multi-schema"
+ projects = "some"
+ project_ids = [vercel_project.example.id]
+ filter = "level >= 'info'"
+
+ schemas = {
+ log = {
+ version = "v1"
+ }
+ trace = {
+ version = "v1"
+ }
+ analytics = {
+ version = "v1"
+ }
+ speed_insights = {
+ version = "v1"
+ }
+ }
+
+ delivery = {
+ type = "http"
+ endpoint = {
+ url = "https://example.com/advanced-drain"
+ }
+ encoding = "ndjson"
+ compression = "gzip"
+ headers = {
+ "Authorization" = "Bearer advanced-token"
+ "Content-Type" = "application/x-ndjson"
+ "X-Custom" = "custom-header"
+ }
+ secret = "your-signing-secret-for-verification"
+ }
+
+ sampling = [
+ {
+ type = "head_sampling"
+ rate = 0.1
+ environment = "production"
+ },
+ {
+ type = "head_sampling"
+ rate = 0.5
+ environment = "preview"
+ request_path = "/api/"
+ }
+ ]
+
+ transforms = [
+ {
+ id = "transform-filter-pii"
+ },
+ {
+ id = "transform-enrich-context"
+ }
+ ]
+}
+
+# OTLP HTTP drain for traces
+resource "vercel_drain" "otlp_traces" {
+ name = "jaeger-traces"
+ projects = "all"
+
+ schemas = {
+ trace = {
+ version = "v1"
+ }
+ }
+
+ delivery = {
+ type = "otlphttp"
+ endpoint = {
+ traces = "https://jaeger.example.com/api/traces"
+ }
+ encoding = "proto"
+ headers = {
+ "Authorization" = "Bearer jaeger-token"
+ }
+ }
+
+ sampling = [
+ {
+ type = "head_sampling"
+ rate = 0.01 # 1% sampling for traces
+ }
+ ]
+}
diff --git a/vercel/data_source_drain.go b/vercel/data_source_drain.go
new file mode 100644
index 00000000..5d7fcdba
--- /dev/null
+++ b/vercel/data_source_drain.go
@@ -0,0 +1,387 @@
+package vercel
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ "github.com/hashicorp/terraform-plugin-framework/datasource"
+ "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
+ "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+ "github.com/vercel/terraform-provider-vercel/v3/client"
+)
+
+var (
+ _ datasource.DataSource = &drainDataSource{}
+ _ datasource.DataSourceWithConfigure = &drainDataSource{}
+)
+
+func newDrainDataSource() datasource.DataSource {
+ return &drainDataSource{}
+}
+
+type drainDataSource struct {
+ client *client.Client
+}
+
+func (d *drainDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
+ resp.TypeName = req.ProviderTypeName + "_drain"
+}
+
+func (d *drainDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
+ if req.ProviderData == nil {
+ return
+ }
+
+ client, ok := req.ProviderData.(*client.Client)
+ if !ok {
+ resp.Diagnostics.AddError(
+ "Unexpected Data Source Configure Type",
+ fmt.Sprintf("Expected *client.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData),
+ )
+ return
+ }
+
+ d.client = client
+}
+
+func (r *drainDataSource) Schema(_ context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) {
+ resp.Schema = schema.Schema{
+ Description: `
+Provides information about an existing Drain.
+
+Drains collect various types of data including logs, traces, analytics, and speed insights from your Vercel projects.
+This is a more generic version of log drains that supports multiple data types and delivery methods.
+
+Teams on Pro and Enterprise plans can create configurable drains from the Vercel dashboard.
+`,
+ Attributes: map[string]schema.Attribute{
+ "id": schema.StringAttribute{
+ Description: "The ID of the Drain.",
+ Required: true,
+ },
+ "team_id": schema.StringAttribute{
+ Optional: true,
+ Computed: true,
+ Description: "The ID of the team the Drain should exist under. Required when configuring a team resource if a default team has not been set in the provider.",
+ },
+ "name": schema.StringAttribute{
+ Description: "The name of the Drain.",
+ Computed: true,
+ },
+ "projects": schema.StringAttribute{
+ Description: "Whether to include all projects or a specific set. Valid values are `all` or `some`.",
+ Computed: true,
+ },
+ "project_ids": schema.SetAttribute{
+ Description: "A list of project IDs that the drain should be associated with. Only valid when `projects` is set to `some`.",
+ Computed: true,
+ ElementType: types.StringType,
+ },
+ "filter": schema.StringAttribute{
+ Description: "A filter expression applied to incoming data.",
+ Computed: true,
+ },
+ "schemas": schema.MapAttribute{
+ Description: "A map of schema configurations. Keys can be `log`, `trace`, `analytics`, or `speed_insights`.",
+ Computed: true,
+ ElementType: types.ObjectType{
+ AttrTypes: map[string]attr.Type{
+ "version": types.StringType,
+ },
+ },
+ },
+ "delivery": schema.SingleNestedAttribute{
+ Description: "Configuration for how data should be delivered.",
+ Computed: true,
+ Attributes: map[string]schema.Attribute{
+ "type": schema.StringAttribute{
+ Description: "The delivery type. Valid values are `http` or `otlphttp`.",
+ Computed: true,
+ },
+ "endpoint": schema.SingleNestedAttribute{
+ Description: "Endpoint configuration. Contains `url` for HTTP or `traces` for OTLP.",
+ Computed: true,
+ Attributes: map[string]schema.Attribute{
+ "url": schema.StringAttribute{
+ Description: "The endpoint URL for HTTP delivery type.",
+ Computed: true,
+ },
+ "traces": schema.StringAttribute{
+ Description: "The traces endpoint URL for OTLP delivery type.",
+ Computed: true,
+ },
+ },
+ },
+ "encoding": schema.StringAttribute{
+ Description: "The encoding format. Valid values are `json`, `ndjson` (for HTTP) or `proto` (for OTLP).",
+ Computed: true,
+ },
+ "compression": schema.StringAttribute{
+ Description: "The compression method. Valid values are `gzip` or `none`. Only applicable for HTTP delivery.",
+ Computed: true,
+ },
+ "headers": schema.MapAttribute{
+ Description: "Custom headers to include in HTTP requests.",
+ Computed: true,
+ ElementType: types.StringType,
+ },
+ },
+ },
+ "sampling": schema.SetNestedAttribute{
+ Description: "Sampling configuration for the drain.",
+ Computed: true,
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: map[string]schema.Attribute{
+ "type": schema.StringAttribute{
+ Description: "The sampling type. Only `head_sampling` is supported.",
+ Computed: true,
+ },
+ "rate": schema.Float64Attribute{
+ Description: "The sampling rate from 0 to 1 (e.g., 0.1 for 10%).",
+ Computed: true,
+ },
+ "environment": schema.StringAttribute{
+ Description: "The environment to apply sampling to. Valid values are `production` or `preview`.",
+ Computed: true,
+ },
+ "request_path": schema.StringAttribute{
+ Description: "Request path prefix to apply the sampling rule to.",
+ Computed: true,
+ },
+ },
+ },
+ },
+ "transforms": schema.SetNestedAttribute{
+ Description: "Transform configurations for the drain.",
+ Computed: true,
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: map[string]schema.Attribute{
+ "id": schema.StringAttribute{
+ Description: "The transform ID.",
+ Computed: true,
+ },
+ },
+ },
+ },
+ "status": schema.StringAttribute{
+ Description: "The status of the drain.",
+ Computed: true,
+ },
+ },
+ }
+}
+
+type DrainDataSource struct {
+ ID types.String `tfsdk:"id"`
+ TeamID types.String `tfsdk:"team_id"`
+ Name types.String `tfsdk:"name"`
+ Projects types.String `tfsdk:"projects"`
+ ProjectIds types.Set `tfsdk:"project_ids"`
+ Filter types.String `tfsdk:"filter"`
+ Schemas types.Map `tfsdk:"schemas"`
+ Delivery types.Object `tfsdk:"delivery"`
+ Sampling types.Set `tfsdk:"sampling"`
+ Transforms types.Set `tfsdk:"transforms"`
+ Status types.String `tfsdk:"status"`
+}
+
+type DeliveryDataSourceModel struct {
+ Type types.String `tfsdk:"type"`
+ Endpoint types.Object `tfsdk:"endpoint"`
+ Encoding types.String `tfsdk:"encoding"`
+ Compression types.String `tfsdk:"compression"`
+ Headers types.Map `tfsdk:"headers"`
+}
+
+type EndpointDataSourceModel struct {
+ URL types.String `tfsdk:"url"`
+ Traces types.String `tfsdk:"traces"`
+}
+
+func responseToDrainDataSource(ctx context.Context, out client.Drain) (DrainDataSource, diag.Diagnostics) {
+ var diags diag.Diagnostics
+
+ projectIds, d := types.SetValueFrom(ctx, types.StringType, out.ProjectIds)
+ diags.Append(d...)
+ if diags.HasError() {
+ return DrainDataSource{}, diags
+ }
+
+ schemasMap := make(map[string]SchemaVersionModel)
+ for k, v := range out.Schemas {
+ if schemaMap, ok := v.(map[string]any); ok {
+ if version, exists := schemaMap["version"]; exists {
+ if versionStr, ok := version.(string); ok {
+ schemasMap[k] = SchemaVersionModel{
+ Version: types.StringValue(versionStr),
+ }
+ }
+ }
+ }
+ }
+
+ schemas, d := types.MapValueFrom(ctx, types.ObjectType{
+ AttrTypes: map[string]attr.Type{
+ "version": types.StringType,
+ },
+ }, schemasMap)
+ diags.Append(d...)
+ if diags.HasError() {
+ return DrainDataSource{}, diags
+ }
+
+ deliveryHeaders, d := types.MapValueFrom(ctx, types.StringType, out.Delivery.Headers)
+ diags.Append(d...)
+ if diags.HasError() {
+ return DrainDataSource{}, diags
+ }
+
+ deliveryModel := DeliveryDataSourceModel{
+ Type: types.StringValue(out.Delivery.Type),
+ Encoding: types.StringValue(out.Delivery.Encoding),
+ Compression: types.StringPointerValue(out.Delivery.Compression),
+ Headers: deliveryHeaders,
+ }
+
+ var endpointModel EndpointDataSourceModel
+ if endpoint, ok := out.Delivery.Endpoint.(string); ok {
+ endpointModel = EndpointDataSourceModel{
+ URL: types.StringValue(endpoint),
+ Traces: types.StringNull(),
+ }
+ } else if otlpEndpoint, ok := out.Delivery.Endpoint.(map[string]any); ok {
+ if traces, exists := otlpEndpoint["traces"]; exists {
+ if tracesStr, ok := traces.(string); ok {
+ endpointModel = EndpointDataSourceModel{
+ URL: types.StringNull(),
+ Traces: types.StringValue(tracesStr),
+ }
+ }
+ }
+ }
+
+ endpoint, d := types.ObjectValueFrom(ctx, map[string]attr.Type{
+ "url": types.StringType,
+ "traces": types.StringType,
+ }, endpointModel)
+ diags.Append(d...)
+ if diags.HasError() {
+ return DrainDataSource{}, diags
+ }
+
+ deliveryModel.Endpoint = endpoint
+
+ delivery, d := types.ObjectValueFrom(ctx, map[string]attr.Type{
+ "type": types.StringType,
+ "endpoint": types.ObjectType{AttrTypes: map[string]attr.Type{"url": types.StringType, "traces": types.StringType}},
+ "encoding": types.StringType,
+ "compression": types.StringType,
+ "headers": types.MapType{ElemType: types.StringType},
+ }, deliveryModel)
+ diags.Append(d...)
+ if diags.HasError() {
+ return DrainDataSource{}, diags
+ }
+
+ samplingModels := make([]SamplingModel, len(out.Sampling))
+ for i, s := range out.Sampling {
+ samplingModels[i] = SamplingModel{
+ Type: types.StringValue(s.Type),
+ Rate: types.Float64Value(s.Rate),
+ Environment: types.StringPointerValue(s.Env),
+ RequestPath: types.StringPointerValue(s.RequestPath),
+ }
+ }
+
+ sampling, d := types.SetValueFrom(ctx, types.ObjectType{
+ AttrTypes: map[string]attr.Type{
+ "type": types.StringType,
+ "rate": types.Float64Type,
+ "environment": types.StringType,
+ "request_path": types.StringType,
+ },
+ }, samplingModels)
+ diags.Append(d...)
+ if diags.HasError() {
+ return DrainDataSource{}, diags
+ }
+
+ transformModels := make([]TransformModel, len(out.Transforms))
+ for i, t := range out.Transforms {
+ transformModels[i] = TransformModel{
+ ID: types.StringValue(t.ID),
+ }
+ }
+
+ transforms, d := types.SetValueFrom(ctx, types.ObjectType{
+ AttrTypes: map[string]attr.Type{
+ "id": types.StringType,
+ },
+ }, transformModels)
+ diags.Append(d...)
+ if diags.HasError() {
+ return DrainDataSource{}, diags
+ }
+
+ return DrainDataSource{
+ ID: types.StringValue(out.ID),
+ TeamID: toTeamID(out.TeamID),
+ Name: types.StringValue(out.Name),
+ Projects: types.StringValue(out.Projects),
+ ProjectIds: projectIds,
+ Filter: types.StringPointerValue(out.Filter),
+ Schemas: schemas,
+ Delivery: delivery,
+ Sampling: sampling,
+ Transforms: transforms,
+ Status: types.StringValue(out.Status),
+ }, diags
+}
+
+// Read will read the drain information by requesting it from the Vercel API, and will update terraform
+// with this information.
+// It is called by the provider whenever data source values should be read to update state.
+func (d *drainDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
+ var config DrainDataSource
+ diags := req.Config.Get(ctx, &config)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ out, err := d.client.GetDrain(ctx, config.ID.ValueString(), config.TeamID.ValueString())
+ if client.NotFound(err) {
+ resp.State.RemoveResource(ctx)
+ return
+ }
+ if err != nil {
+ resp.Diagnostics.AddError(
+ "Error reading Drain",
+ fmt.Sprintf("Could not get Drain %s %s, unexpected error: %s",
+ config.TeamID.ValueString(),
+ config.ID.ValueString(),
+ err,
+ ),
+ )
+ return
+ }
+
+ result, diags := responseToDrainDataSource(ctx, out)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ tflog.Info(ctx, "read drain", map[string]any{
+ "team_id": result.TeamID.ValueString(),
+ "drain_id": result.ID.ValueString(),
+ })
+
+ diags = resp.State.Set(ctx, result)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+}
diff --git a/vercel/provider.go b/vercel/provider.go
index b8d6e29c..89575277 100644
--- a/vercel/provider.go
+++ b/vercel/provider.go
@@ -59,6 +59,7 @@ func (p *vercelProvider) Resources(_ context.Context) []func() resource.Resource
newCustomEnvironmentResource,
newDeploymentResource,
newDNSRecordResource,
+ newDrainResource,
newEdgeConfigItemResource,
newEdgeConfigResource,
newEdgeConfigSchemaResource,
@@ -94,6 +95,7 @@ func (p *vercelProvider) DataSources(_ context.Context) []func() datasource.Data
newCustomEnvironmentDataSource,
newDeploymentDataSource,
newDomainConfigDataSource,
+ newDrainDataSource,
newEdgeConfigDataSource,
newEdgeConfigItemDataSource,
newEdgeConfigSchemaDataSource,
diff --git a/vercel/resource_drain.go b/vercel/resource_drain.go
new file mode 100644
index 00000000..49793121
--- /dev/null
+++ b/vercel/resource_drain.go
@@ -0,0 +1,865 @@
+package vercel
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/hashicorp/terraform-plugin-framework-validators/float64validator"
+ "github.com/hashicorp/terraform-plugin-framework-validators/mapvalidator"
+ "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator"
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-framework/resource"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/schema/validator"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/hashicorp/terraform-plugin-framework/types/basetypes"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+ "github.com/vercel/terraform-provider-vercel/v3/client"
+)
+
+var (
+ _ resource.Resource = &drainResource{}
+ _ resource.ResourceWithConfigure = &drainResource{}
+ _ resource.ResourceWithImportState = &drainResource{}
+)
+
+func newDrainResource() resource.Resource {
+ return &drainResource{}
+}
+
+type drainResource struct {
+ client *client.Client
+}
+
+func (r *drainResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
+ resp.TypeName = req.ProviderTypeName + "_drain"
+}
+
+func (r *drainResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
+ if req.ProviderData == nil {
+ return
+ }
+
+ client, ok := req.ProviderData.(*client.Client)
+ if !ok {
+ resp.Diagnostics.AddError(
+ "Unexpected Resource Configure Type",
+ fmt.Sprintf("Expected *client.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData),
+ )
+ return
+ }
+
+ r.client = client
+}
+
+func (r *drainResource) Schema(_ context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) {
+ resp.Schema = schema.Schema{
+ Description: `
+Provides a Configurable Drain resource.
+
+Drains collect various types of data including logs, traces, analytics, and speed insights from your Vercel projects.
+This is a more generic version of log drains that supports multiple data types and delivery methods.
+
+Teams on Pro and Enterprise plans can create configurable drains from the Vercel dashboard.
+
+~> Only Pro and Enterprise teams can create Configurable Drains.`,
+ Attributes: map[string]schema.Attribute{
+ "id": schema.StringAttribute{
+ Description: "The ID of the Drain.",
+ Computed: true,
+ PlanModifiers: []planmodifier.String{stringplanmodifier.UseStateForUnknown()},
+ },
+ "team_id": schema.StringAttribute{
+ Optional: true,
+ Computed: true,
+ Description: "The ID of the team the Drain should exist under. Required when configuring a team resource if a default team has not been set in the provider.",
+ PlanModifiers: []planmodifier.String{stringplanmodifier.RequiresReplaceIfConfigured(), stringplanmodifier.UseStateForUnknown()},
+ },
+ "name": schema.StringAttribute{
+ Description: "The name of the Drain.",
+ Required: true,
+ },
+ "projects": schema.StringAttribute{
+ Description: "Whether to include all projects or a specific set. Valid values are `all` or `some`.",
+ Required: true,
+ Validators: []validator.String{
+ stringvalidator.OneOf("all", "some"),
+ },
+ },
+ "project_ids": schema.SetAttribute{
+ Description: "A list of project IDs that the drain should be associated with. Required when `projects` is `some`.",
+ Optional: true,
+ ElementType: types.StringType,
+ },
+ "filter": schema.StringAttribute{
+ Description: "A filter expression to apply to incoming data.",
+ Optional: true,
+ },
+ "schemas": schema.MapAttribute{
+ Description: "A map of schema configurations. Keys can be `log`, `trace`, `analytics`, or `speed_insights`.",
+ Required: true,
+ ElementType: types.ObjectType{
+ AttrTypes: map[string]attr.Type{
+ "version": types.StringType,
+ },
+ },
+ },
+ "delivery": schema.SingleNestedAttribute{
+ Description: "Configuration for how data should be delivered.",
+ Required: true,
+ Attributes: map[string]schema.Attribute{
+ "type": schema.StringAttribute{
+ Description: "The delivery type. Valid values are `http` or `otlphttp`",
+ Required: true,
+ Validators: []validator.String{
+ stringvalidator.OneOf("http", "otlphttp"),
+ },
+ },
+ "endpoint": schema.SingleNestedAttribute{
+ Description: "Endpoint configuration. Use `url` for HTTP or `traces` for OTLP.",
+ Optional: true,
+ Attributes: map[string]schema.Attribute{
+ "url": schema.StringAttribute{
+ Description: "The endpoint URL for HTTP delivery type.",
+ Optional: true,
+ },
+ "traces": schema.StringAttribute{
+ Description: "The traces endpoint URL for OTLP delivery type.",
+ Optional: true,
+ },
+ },
+ },
+ "encoding": schema.StringAttribute{
+ Description: "The encoding format. Valid values are `json`, `ndjson`, or `proto` (for OTLP).",
+ Optional: true,
+ Validators: []validator.String{
+ stringvalidator.OneOf("json", "ndjson", "proto"),
+ },
+ },
+ "compression": schema.StringAttribute{
+ Description: "The compression method. Valid values are `gzip` or `none`.",
+ Optional: true,
+ Validators: []validator.String{
+ stringvalidator.OneOf("gzip", "none"),
+ },
+ },
+ "headers": schema.MapAttribute{
+ Description: "Custom headers to include in HTTP requests.",
+ Optional: true,
+ ElementType: types.StringType,
+ Validators: []validator.Map{
+ mapvalidator.SizeAtMost(10),
+ },
+ },
+ "secret": schema.StringAttribute{
+ Description: "A secret for signing requests.",
+ Optional: true,
+ Sensitive: true,
+ },
+ },
+ },
+ "sampling": schema.SetNestedAttribute{
+ Description: "Sampling configuration for the drain.",
+ Optional: true,
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: map[string]schema.Attribute{
+ "type": schema.StringAttribute{
+ Description: "The sampling type. Only `head_sampling` is supported.",
+ Required: true,
+ Validators: []validator.String{
+ stringvalidator.OneOf("head_sampling"),
+ },
+ },
+ "rate": schema.Float64Attribute{
+ Description: "The sampling rate from 0 to 1 (e.g., 0.1 for 10%).",
+ Required: true,
+ Validators: []validator.Float64{
+ float64validator.AtLeast(0),
+ float64validator.AtMost(1),
+ },
+ },
+ "environment": schema.StringAttribute{
+ Description: "The environment to apply sampling to. Valid values are `production` or `preview`.",
+ Optional: true,
+ Validators: []validator.String{
+ stringvalidator.OneOf("production", "preview"),
+ },
+ },
+ "request_path": schema.StringAttribute{
+ Description: "Request path prefix to apply the sampling rule to.",
+ Optional: true,
+ },
+ },
+ },
+ },
+ "transforms": schema.SetNestedAttribute{
+ Description: "Transform configurations for the drain.",
+ Optional: true,
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: map[string]schema.Attribute{
+ "id": schema.StringAttribute{
+ Description: "The transform ID.",
+ Required: true,
+ },
+ },
+ },
+ },
+ "status": schema.StringAttribute{
+ Description: "The status of the drain.",
+ Computed: true,
+ },
+ },
+ }
+}
+
+type Drain struct {
+ ID types.String `tfsdk:"id"`
+ TeamID types.String `tfsdk:"team_id"`
+ Name types.String `tfsdk:"name"`
+ Projects types.String `tfsdk:"projects"`
+ ProjectIds types.Set `tfsdk:"project_ids"`
+ Filter types.String `tfsdk:"filter"`
+ Schemas types.Map `tfsdk:"schemas"`
+ Delivery types.Object `tfsdk:"delivery"`
+ Sampling types.Set `tfsdk:"sampling"`
+ Transforms types.Set `tfsdk:"transforms"`
+ Status types.String `tfsdk:"status"`
+}
+
+type DeliveryModel struct {
+ Type types.String `tfsdk:"type"`
+ Endpoint types.Object `tfsdk:"endpoint"`
+ Encoding types.String `tfsdk:"encoding"`
+ Compression types.String `tfsdk:"compression"`
+ Headers types.Map `tfsdk:"headers"`
+ Secret types.String `tfsdk:"secret"`
+}
+
+type EndpointModel struct {
+ URL types.String `tfsdk:"url"`
+ Traces types.String `tfsdk:"traces"`
+}
+
+type SamplingModel struct {
+ Type types.String `tfsdk:"type"`
+ Rate types.Float64 `tfsdk:"rate"`
+ Environment types.String `tfsdk:"environment"`
+ RequestPath types.String `tfsdk:"request_path"`
+}
+
+type TransformModel struct {
+ ID types.String `tfsdk:"id"`
+}
+
+type SchemaVersionModel struct {
+ Version types.String `tfsdk:"version"`
+}
+
+func responseToDrain(ctx context.Context, out client.Drain) (Drain, diag.Diagnostics) {
+ var diags diag.Diagnostics
+
+ projectIds, d := types.SetValueFrom(ctx, types.StringType, out.ProjectIds)
+ diags.Append(d...)
+ if diags.HasError() {
+ return Drain{}, diags
+ }
+
+ schemasMap := make(map[string]SchemaVersionModel)
+ for k, v := range out.Schemas {
+ if schemaMap, ok := v.(map[string]any); ok {
+ if version, exists := schemaMap["version"]; exists {
+ if versionStr, ok := version.(string); ok {
+ schemasMap[k] = SchemaVersionModel{
+ Version: types.StringValue(versionStr),
+ }
+ }
+ }
+ }
+ }
+
+ schemas, d := types.MapValueFrom(ctx, types.ObjectType{
+ AttrTypes: map[string]attr.Type{
+ "version": types.StringType,
+ },
+ }, schemasMap)
+ diags.Append(d...)
+ if diags.HasError() {
+ return Drain{}, diags
+ }
+
+ deliveryHeaders, d := types.MapValueFrom(ctx, types.StringType, out.Delivery.Headers)
+ diags.Append(d...)
+ if diags.HasError() {
+ return Drain{}, diags
+ }
+
+ deliveryModel := DeliveryModel{
+ Type: types.StringValue(out.Delivery.Type),
+ Encoding: types.StringValue(out.Delivery.Encoding),
+ Compression: types.StringPointerValue(out.Delivery.Compression),
+ Headers: deliveryHeaders,
+ Secret: types.StringPointerValue(out.Delivery.Secret),
+ }
+
+ var endpointModel EndpointModel
+ if endpoint, ok := out.Delivery.Endpoint.(string); ok {
+ endpointModel = EndpointModel{
+ URL: types.StringValue(endpoint),
+ Traces: types.StringNull(),
+ }
+ } else if otlpEndpoint, ok := out.Delivery.Endpoint.(map[string]any); ok {
+ if traces, exists := otlpEndpoint["traces"]; exists {
+ if tracesStr, ok := traces.(string); ok {
+ endpointModel = EndpointModel{
+ URL: types.StringNull(),
+ Traces: types.StringValue(tracesStr),
+ }
+ }
+ }
+ }
+
+ endpoint, d := types.ObjectValueFrom(ctx, map[string]attr.Type{
+ "url": types.StringType,
+ "traces": types.StringType,
+ }, endpointModel)
+ diags.Append(d...)
+ if diags.HasError() {
+ return Drain{}, diags
+ }
+ deliveryModel.Endpoint = endpoint
+
+ delivery, d := types.ObjectValueFrom(ctx, map[string]attr.Type{
+ "type": types.StringType,
+ "endpoint": types.ObjectType{AttrTypes: map[string]attr.Type{"url": types.StringType, "traces": types.StringType}},
+ "encoding": types.StringType,
+ "compression": types.StringType,
+ "headers": types.MapType{ElemType: types.StringType},
+ "secret": types.StringType,
+ }, deliveryModel)
+ diags.Append(d...)
+ if diags.HasError() {
+ return Drain{}, diags
+ }
+
+ samplingModels := make([]SamplingModel, len(out.Sampling))
+ for i, s := range out.Sampling {
+ samplingModels[i] = SamplingModel{
+ Type: types.StringValue(s.Type),
+ Rate: types.Float64Value(s.Rate),
+ Environment: types.StringPointerValue(s.Env),
+ RequestPath: types.StringPointerValue(s.RequestPath),
+ }
+ }
+
+ sampling, d := types.SetValueFrom(ctx, types.ObjectType{
+ AttrTypes: map[string]attr.Type{
+ "type": types.StringType,
+ "rate": types.Float64Type,
+ "environment": types.StringType,
+ "request_path": types.StringType,
+ },
+ }, samplingModels)
+ diags.Append(d...)
+ if diags.HasError() {
+ return Drain{}, diags
+ }
+
+ transformModels := make([]TransformModel, len(out.Transforms))
+ for i, t := range out.Transforms {
+ transformModels[i] = TransformModel{
+ ID: types.StringValue(t.ID),
+ }
+ }
+
+ transforms, d := types.SetValueFrom(ctx, types.ObjectType{
+ AttrTypes: map[string]attr.Type{
+ "id": types.StringType,
+ },
+ }, transformModels)
+ diags.Append(d...)
+ if diags.HasError() {
+ return Drain{}, diags
+ }
+
+ return Drain{
+ ID: types.StringValue(out.ID),
+ TeamID: toTeamID(out.TeamID),
+ Name: types.StringValue(out.Name),
+ Projects: types.StringValue(out.Projects),
+ ProjectIds: projectIds,
+ Filter: types.StringPointerValue(out.Filter),
+ Schemas: schemas,
+ Delivery: delivery,
+ Sampling: sampling,
+ Transforms: transforms,
+ Status: types.StringValue(out.Status),
+ }, diags
+}
+
+func (r *drainResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
+ var plan Drain
+ diags := req.Plan.Get(ctx, &plan)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ createRequest, d := planToCreateRequest(ctx, plan)
+ resp.Diagnostics.Append(d...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ out, err := r.client.CreateDrain(ctx, createRequest)
+ if err != nil {
+ resp.Diagnostics.AddError(
+ "Error creating Drain",
+ "Could not create Drain, unexpected error: "+err.Error(),
+ )
+ return
+ }
+
+ result, diags := responseToDrain(ctx, out)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ tflog.Info(ctx, "created Drain", map[string]any{
+ "team_id": plan.TeamID.ValueString(),
+ "drain_id": result.ID.ValueString(),
+ })
+
+ diags = resp.State.Set(ctx, result)
+ resp.Diagnostics.Append(diags...)
+}
+
+func planToCreateRequest(ctx context.Context, plan Drain) (client.CreateDrainRequest, diag.Diagnostics) {
+ var diags diag.Diagnostics
+
+ var projectIds []string
+ if !plan.ProjectIds.IsNull() && !plan.ProjectIds.IsUnknown() {
+ d := plan.ProjectIds.ElementsAs(ctx, &projectIds, false)
+ diags.Append(d...)
+ if diags.HasError() {
+ return client.CreateDrainRequest{}, diags
+ }
+ }
+
+ schemas := make(map[string]client.SchemaConfig)
+ var schemasMap map[string]SchemaVersionModel
+ d := plan.Schemas.ElementsAs(ctx, &schemasMap, false)
+ diags.Append(d...)
+ if diags.HasError() {
+ return client.CreateDrainRequest{}, diags
+ }
+
+ for k, v := range schemasMap {
+ schemas[k] = client.SchemaConfig{
+ Version: v.Version.ValueString(),
+ }
+ }
+
+ var deliveryModel DeliveryModel
+ d = plan.Delivery.As(ctx, &deliveryModel, basetypes.ObjectAsOptions{})
+ diags.Append(d...)
+ if diags.HasError() {
+ return client.CreateDrainRequest{}, diags
+ }
+
+ var headers map[string]string
+ if !deliveryModel.Headers.IsNull() && !deliveryModel.Headers.IsUnknown() {
+ d = deliveryModel.Headers.ElementsAs(ctx, &headers, false)
+ diags.Append(d...)
+ if diags.HasError() {
+ return client.CreateDrainRequest{}, diags
+ }
+ }
+
+ delivery := client.DeliveryConfig{
+ Type: deliveryModel.Type.ValueString(),
+ Encoding: deliveryModel.Encoding.ValueString(),
+ Headers: headers,
+ }
+
+ if !deliveryModel.Endpoint.IsNull() && !deliveryModel.Endpoint.IsUnknown() {
+ var endpointModel EndpointModel
+ d = deliveryModel.Endpoint.As(ctx, &endpointModel, basetypes.ObjectAsOptions{})
+ diags.Append(d...)
+ if !diags.HasError() {
+ if !endpointModel.Traces.IsNull() && !endpointModel.Traces.IsUnknown() {
+ delivery.Endpoint = map[string]string{
+ "traces": endpointModel.Traces.ValueString(),
+ }
+ } else if !endpointModel.URL.IsNull() && !endpointModel.URL.IsUnknown() {
+ delivery.Endpoint = endpointModel.URL.ValueString()
+ }
+ }
+ }
+ if !deliveryModel.Compression.IsNull() && !deliveryModel.Compression.IsUnknown() {
+ compression := deliveryModel.Compression.ValueString()
+ delivery.Compression = &compression
+ }
+ if !deliveryModel.Secret.IsNull() && !deliveryModel.Secret.IsUnknown() {
+ secret := deliveryModel.Secret.ValueString()
+ delivery.Secret = &secret
+ }
+
+ var sampling []client.SamplingConfig
+ if !plan.Sampling.IsNull() && !plan.Sampling.IsUnknown() {
+ var samplingModels []SamplingModel
+ d = plan.Sampling.ElementsAs(ctx, &samplingModels, false)
+ diags.Append(d...)
+ if diags.HasError() {
+ return client.CreateDrainRequest{}, diags
+ }
+
+ for _, s := range samplingModels {
+ samplingConfig := client.SamplingConfig{
+ Type: s.Type.ValueString(),
+ Rate: s.Rate.ValueFloat64(),
+ }
+ if !s.Environment.IsNull() && !s.Environment.IsUnknown() {
+ env := s.Environment.ValueString()
+ samplingConfig.Env = &env
+ }
+ if !s.RequestPath.IsNull() && !s.RequestPath.IsUnknown() {
+ path := s.RequestPath.ValueString()
+ samplingConfig.RequestPath = &path
+ }
+ sampling = append(sampling, samplingConfig)
+ }
+ }
+
+ var transforms []client.TransformConfig
+ if !plan.Transforms.IsNull() && !plan.Transforms.IsUnknown() {
+ var transformModels []TransformModel
+ d = plan.Transforms.ElementsAs(ctx, &transformModels, false)
+ diags.Append(d...)
+ if diags.HasError() {
+ return client.CreateDrainRequest{}, diags
+ }
+
+ for _, t := range transformModels {
+ transforms = append(transforms, client.TransformConfig{
+ ID: t.ID.ValueString(),
+ })
+ }
+ }
+
+ var filter *string
+ if !plan.Filter.IsNull() && !plan.Filter.IsUnknown() {
+ f := plan.Filter.ValueString()
+ filter = &f
+ }
+
+ return client.CreateDrainRequest{
+ TeamID: plan.TeamID.ValueString(),
+ Name: plan.Name.ValueString(),
+ Projects: plan.Projects.ValueString(),
+ ProjectIds: projectIds,
+ Filter: filter,
+ Schemas: schemas,
+ Delivery: delivery,
+ Sampling: sampling,
+ Transforms: transforms,
+ }, diags
+}
+
+func (r *drainResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) {
+ var state Drain
+ diags := req.State.Get(ctx, &state)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ out, err := r.client.GetDrain(ctx, state.ID.ValueString(), state.TeamID.ValueString())
+ if client.NotFound(err) {
+ resp.State.RemoveResource(ctx)
+ return
+ }
+ if err != nil {
+ resp.Diagnostics.AddError(
+ "Error reading Drain",
+ fmt.Sprintf("Could not get Drain %s %s, unexpected error: %s",
+ state.TeamID.ValueString(),
+ state.ID.ValueString(),
+ err,
+ ),
+ )
+ return
+ }
+
+ result, diags := responseToDrain(ctx, out)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ tflog.Info(ctx, "read drain", map[string]any{
+ "team_id": result.TeamID.ValueString(),
+ "drain_id": result.ID.ValueString(),
+ })
+
+ diags = resp.State.Set(ctx, result)
+ resp.Diagnostics.Append(diags...)
+}
+
+func (r *drainResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) {
+ var plan, state Drain
+
+ diags := req.Plan.Get(ctx, &plan)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ diags = req.State.Get(ctx, &state)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ updateRequest, d := planToUpdateRequest(ctx, plan)
+ resp.Diagnostics.Append(d...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ out, err := r.client.UpdateDrain(ctx, state.ID.ValueString(), updateRequest)
+ if err != nil {
+ resp.Diagnostics.AddError(
+ "Error updating Drain",
+ "Could not update Drain, unexpected error: "+err.Error(),
+ )
+ return
+ }
+
+ result, diags := responseToDrain(ctx, out)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ tflog.Info(ctx, "updated Drain", map[string]any{
+ "team_id": plan.TeamID.ValueString(),
+ "drain_id": result.ID.ValueString(),
+ })
+
+ diags = resp.State.Set(ctx, result)
+ resp.Diagnostics.Append(diags...)
+}
+
+func planToUpdateRequest(ctx context.Context, plan Drain) (client.UpdateDrainRequest, diag.Diagnostics) {
+ var diags diag.Diagnostics
+
+ var projectIds []string
+ if !plan.ProjectIds.IsNull() && !plan.ProjectIds.IsUnknown() {
+ d := plan.ProjectIds.ElementsAs(ctx, &projectIds, false)
+ diags.Append(d...)
+ if diags.HasError() {
+ return client.UpdateDrainRequest{}, diags
+ }
+ }
+
+ schemas := make(map[string]client.SchemaConfig)
+ var schemasMap map[string]SchemaVersionModel
+ d := plan.Schemas.ElementsAs(ctx, &schemasMap, false)
+ diags.Append(d...)
+ if diags.HasError() {
+ return client.UpdateDrainRequest{}, diags
+ }
+
+ for k, v := range schemasMap {
+ schemas[k] = client.SchemaConfig{
+ Version: v.Version.ValueString(),
+ }
+ }
+
+ var deliveryModel DeliveryModel
+ d = plan.Delivery.As(ctx, &deliveryModel, basetypes.ObjectAsOptions{})
+ diags.Append(d...)
+ if diags.HasError() {
+ return client.UpdateDrainRequest{}, diags
+ }
+
+ var headers map[string]string
+ if !deliveryModel.Headers.IsNull() && !deliveryModel.Headers.IsUnknown() {
+ d = deliveryModel.Headers.ElementsAs(ctx, &headers, false)
+ diags.Append(d...)
+ if diags.HasError() {
+ return client.UpdateDrainRequest{}, diags
+ }
+ }
+
+ delivery := &client.DeliveryConfig{
+ Type: deliveryModel.Type.ValueString(),
+ Encoding: deliveryModel.Encoding.ValueString(),
+ Headers: headers,
+ }
+
+ if !deliveryModel.Endpoint.IsNull() && !deliveryModel.Endpoint.IsUnknown() {
+ var endpointModel EndpointModel
+ d = deliveryModel.Endpoint.As(ctx, &endpointModel, basetypes.ObjectAsOptions{})
+ diags.Append(d...)
+ if !diags.HasError() {
+ if !endpointModel.Traces.IsNull() && !endpointModel.Traces.IsUnknown() {
+ delivery.Endpoint = map[string]string{
+ "traces": endpointModel.Traces.ValueString(),
+ }
+ } else if !endpointModel.URL.IsNull() && !endpointModel.URL.IsUnknown() {
+ delivery.Endpoint = endpointModel.URL.ValueString()
+ }
+ }
+ }
+
+ if !deliveryModel.Compression.IsNull() && !deliveryModel.Compression.IsUnknown() {
+ compression := deliveryModel.Compression.ValueString()
+ delivery.Compression = &compression
+ }
+
+ if !deliveryModel.Secret.IsNull() && !deliveryModel.Secret.IsUnknown() {
+ secret := deliveryModel.Secret.ValueString()
+ delivery.Secret = &secret
+ }
+
+ var sampling []client.SamplingConfig
+ if !plan.Sampling.IsNull() && !plan.Sampling.IsUnknown() {
+ var samplingModels []SamplingModel
+ d = plan.Sampling.ElementsAs(ctx, &samplingModels, false)
+ diags.Append(d...)
+ if diags.HasError() {
+ return client.UpdateDrainRequest{}, diags
+ }
+
+ for _, s := range samplingModels {
+ samplingConfig := client.SamplingConfig{
+ Type: s.Type.ValueString(),
+ Rate: s.Rate.ValueFloat64(),
+ }
+ if !s.Environment.IsNull() && !s.Environment.IsUnknown() {
+ env := s.Environment.ValueString()
+ samplingConfig.Env = &env
+ }
+ if !s.RequestPath.IsNull() && !s.RequestPath.IsUnknown() {
+ path := s.RequestPath.ValueString()
+ samplingConfig.RequestPath = &path
+ }
+ sampling = append(sampling, samplingConfig)
+ }
+ }
+
+ var transforms []client.TransformConfig
+ if !plan.Transforms.IsNull() && !plan.Transforms.IsUnknown() {
+ var transformModels []TransformModel
+ d = plan.Transforms.ElementsAs(ctx, &transformModels, false)
+ diags.Append(d...)
+ if diags.HasError() {
+ return client.UpdateDrainRequest{}, diags
+ }
+
+ for _, t := range transformModels {
+ transforms = append(transforms, client.TransformConfig{
+ ID: t.ID.ValueString(),
+ })
+ }
+ }
+
+ var filter *string
+ if !plan.Filter.IsNull() && !plan.Filter.IsUnknown() {
+ f := plan.Filter.ValueString()
+ filter = &f
+ }
+
+ name := plan.Name.ValueString()
+ projects := plan.Projects.ValueString()
+
+ return client.UpdateDrainRequest{
+ TeamID: plan.TeamID.ValueString(),
+ Name: &name,
+ Projects: &projects,
+ ProjectIds: projectIds,
+ Filter: filter,
+ Schemas: schemas,
+ Delivery: delivery,
+ Sampling: sampling,
+ Transforms: transforms,
+ }, diags
+}
+
+func (r *drainResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) {
+ var state Drain
+ diags := req.State.Get(ctx, &state)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ err := r.client.DeleteDrain(ctx, state.ID.ValueString(), state.TeamID.ValueString())
+ if client.NotFound(err) {
+ return
+ }
+ if err != nil {
+ resp.Diagnostics.AddError(
+ "Error deleting drain",
+ fmt.Sprintf(
+ "Could not delete Drain %s %s, unexpected error: %s",
+ state.TeamID.ValueString(),
+ state.ID.ValueString(),
+ err,
+ ),
+ )
+ return
+ }
+
+ tflog.Info(ctx, "deleted Drain", map[string]any{
+ "team_id": state.TeamID.ValueString(),
+ "drain_id": state.ID.ValueString(),
+ })
+}
+
+func (r *drainResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
+ teamID, id, ok := splitInto1Or2(req.ID)
+ if !ok {
+ resp.Diagnostics.AddError(
+ "Error importing Drain",
+ fmt.Sprintf("Invalid id '%s' specified. should be in format \"team_id/drain_id\" or \"drain_id\"", req.ID),
+ )
+ return
+ }
+
+ out, err := r.client.GetDrain(ctx, id, teamID)
+ if client.NotFound(err) {
+ resp.State.RemoveResource(ctx)
+ return
+ }
+ if err != nil {
+ resp.Diagnostics.AddError(
+ "Error reading Drain",
+ fmt.Sprintf("Could not get Drain %s %s, unexpected error: %s",
+ teamID,
+ id,
+ err,
+ ),
+ )
+ return
+ }
+
+ result, diags := responseToDrain(ctx, out)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ tflog.Info(ctx, "import drain", map[string]any{
+ "team_id": result.TeamID.ValueString(),
+ "drain_id": result.ID.ValueString(),
+ })
+
+ diags = resp.State.Set(ctx, result)
+ resp.Diagnostics.Append(diags...)
+}
diff --git a/vercel/resource_drain_test.go b/vercel/resource_drain_test.go
new file mode 100644
index 00000000..dbad5b37
--- /dev/null
+++ b/vercel/resource_drain_test.go
@@ -0,0 +1,363 @@
+package vercel_test
+
+import (
+ "context"
+ "fmt"
+ "testing"
+
+ "github.com/hashicorp/terraform-plugin-testing/helper/acctest"
+ "github.com/hashicorp/terraform-plugin-testing/helper/resource"
+ "github.com/hashicorp/terraform-plugin-testing/terraform"
+ "github.com/vercel/terraform-provider-vercel/v3/client"
+)
+
+func testCheckDrainExists(testClient *client.Client, teamID, n string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[n]
+ if !ok {
+ return fmt.Errorf("not found: %s", n)
+ }
+
+ if rs.Primary.ID == "" {
+ return fmt.Errorf("no ID is set")
+ }
+
+ _, err := testClient.GetDrain(context.Background(), rs.Primary.ID, teamID)
+ return err
+ }
+}
+
+func testCheckDrainDeleted(testClient *client.Client, n, teamID string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[n]
+ if !ok {
+ return fmt.Errorf("not found: %s", n)
+ }
+
+ if rs.Primary.ID == "" {
+ return fmt.Errorf("no ID is set")
+ }
+
+ _, err := testClient.GetDrain(context.Background(), rs.Primary.ID, teamID)
+ if err == nil {
+ return fmt.Errorf("expected not_found error, but got no error")
+ }
+ if !client.NotFound(err) {
+ return fmt.Errorf("Unexpected error checking for deleted drain: %s", err)
+ }
+
+ return nil
+ }
+}
+
+func TestAcc_DrainResource(t *testing.T) {
+ name := acctest.RandString(16)
+ resource.Test(t, resource.TestCase{
+ ProtoV6ProviderFactories: testAccProtoV6ProviderFactories,
+ CheckDestroy: testCheckDrainDeleted(testClient(t), "vercel_drain.minimal", testTeam(t)),
+ Steps: []resource.TestStep{
+ {
+ Config: cfg(testAccResourceDrain(name)),
+ Check: resource.ComposeAggregateTestCheckFunc(
+ testCheckDrainExists(testClient(t), testTeam(t), "vercel_drain.minimal"),
+ resource.TestCheckResourceAttr("vercel_drain.minimal", "name", "minimal-drain"),
+ resource.TestCheckResourceAttr("vercel_drain.minimal", "projects", "all"),
+ resource.TestCheckResourceAttr("vercel_drain.minimal", "schemas.%", "1"),
+ resource.TestCheckResourceAttr("vercel_drain.minimal", "schemas.log.version", "v1"),
+ resource.TestCheckResourceAttr("vercel_drain.minimal", "delivery.type", "http"),
+ resource.TestCheckResourceAttr("vercel_drain.minimal", "delivery.encoding", "json"),
+ resource.TestCheckResourceAttr("vercel_drain.minimal", "delivery.endpoint.url", "https://example.com/webhook"),
+ resource.TestCheckResourceAttrSet("vercel_drain.minimal", "id"),
+ resource.TestCheckResourceAttrSet("vercel_drain.minimal", "team_id"),
+ resource.TestCheckResourceAttrSet("vercel_drain.minimal", "status"),
+
+ testCheckDrainExists(testClient(t), testTeam(t), "vercel_drain.maximal"),
+ resource.TestCheckResourceAttr("vercel_drain.maximal", "name", "maximal-drain"),
+ resource.TestCheckResourceAttr("vercel_drain.maximal", "projects", "some"),
+ resource.TestCheckResourceAttr("vercel_drain.maximal", "project_ids.#", "1"),
+ resource.TestCheckResourceAttr("vercel_drain.maximal", "schemas.%", "2"),
+ resource.TestCheckResourceAttr("vercel_drain.maximal", "schemas.log.version", "v1"),
+ resource.TestCheckResourceAttr("vercel_drain.maximal", "schemas.trace.version", "v1"),
+ resource.TestCheckResourceAttr("vercel_drain.maximal", "delivery.type", "http"),
+ resource.TestCheckResourceAttr("vercel_drain.maximal", "delivery.encoding", "ndjson"),
+ resource.TestCheckResourceAttr("vercel_drain.maximal", "delivery.endpoint.url", "https://example.com/drain"),
+ resource.TestCheckResourceAttr("vercel_drain.maximal", "delivery.compression", "gzip"),
+ resource.TestCheckResourceAttr("vercel_drain.maximal", "delivery.headers.%", "2"),
+ resource.TestCheckResourceAttr("vercel_drain.maximal", "delivery.headers.Authorization", "Bearer token123"),
+ resource.TestCheckResourceAttr("vercel_drain.maximal", "delivery.headers.Content-Type", "application/x-ndjson"),
+ resource.TestCheckResourceAttr("vercel_drain.maximal", "sampling.#", "2"),
+ resource.TestCheckResourceAttrSet("vercel_drain.maximal", "id"),
+ resource.TestCheckResourceAttrSet("vercel_drain.maximal", "team_id"),
+ resource.TestCheckResourceAttrSet("vercel_drain.maximal", "status"),
+ ),
+ },
+ },
+ })
+}
+
+func TestAcc_DrainResourceUpdate(t *testing.T) {
+ resource.Test(t, resource.TestCase{
+ ProtoV6ProviderFactories: testAccProtoV6ProviderFactories,
+ CheckDestroy: testCheckDrainDeleted(testClient(t), "vercel_drain.update_test", testTeam(t)),
+ Steps: []resource.TestStep{
+ {
+ Config: cfg(testAccResourceDrainInitial()),
+ Check: resource.ComposeAggregateTestCheckFunc(
+ testCheckDrainExists(testClient(t), testTeam(t), "vercel_drain.update_test"),
+ resource.TestCheckResourceAttr("vercel_drain.update_test", "name", "initial-name"),
+ resource.TestCheckResourceAttr("vercel_drain.update_test", "delivery.encoding", "json"),
+ ),
+ },
+ {
+ Config: cfg(testAccResourceDrainUpdated()),
+ Check: resource.ComposeAggregateTestCheckFunc(
+ testCheckDrainExists(testClient(t), testTeam(t), "vercel_drain.update_test"),
+ resource.TestCheckResourceAttr("vercel_drain.update_test", "name", "updated-name"),
+ resource.TestCheckResourceAttr("vercel_drain.update_test", "delivery.encoding", "ndjson"),
+ ),
+ },
+ },
+ })
+}
+
+func TestAcc_DrainDataSource(t *testing.T) {
+ resource.Test(t, resource.TestCase{
+ ProtoV6ProviderFactories: testAccProtoV6ProviderFactories,
+ Steps: []resource.TestStep{
+ {
+ Config: cfg(testAccDataSourceDrain()),
+ Check: resource.ComposeAggregateTestCheckFunc(
+ resource.TestCheckResourceAttr("data.vercel_drain.test", "name", "data-source-test"),
+ resource.TestCheckResourceAttr("data.vercel_drain.test", "projects", "all"),
+ resource.TestCheckResourceAttr("data.vercel_drain.test", "delivery.type", "http"),
+ resource.TestCheckResourceAttr("data.vercel_drain.test", "delivery.encoding", "json"),
+ resource.TestCheckResourceAttr("data.vercel_drain.test", "delivery.endpoint.url", "https://example.com/webhook"),
+ resource.TestCheckResourceAttrSet("data.vercel_drain.test", "id"),
+ resource.TestCheckResourceAttrSet("data.vercel_drain.test", "team_id"),
+ resource.TestCheckResourceAttrSet("data.vercel_drain.test", "status"),
+ ),
+ },
+ },
+ })
+}
+
+func TestAcc_DrainResourceOTLP(t *testing.T) {
+ resource.Test(t, resource.TestCase{
+ ProtoV6ProviderFactories: testAccProtoV6ProviderFactories,
+ CheckDestroy: testCheckDrainDeleted(testClient(t), "vercel_drain.otlp_test", testTeam(t)),
+ Steps: []resource.TestStep{
+ {
+ Config: cfg(testAccResourceDrainOTLP()),
+ Check: resource.ComposeAggregateTestCheckFunc(
+ testCheckDrainExists(testClient(t), testTeam(t), "vercel_drain.otlp_test"),
+ resource.TestCheckResourceAttr("vercel_drain.otlp_test", "name", "otlp-drain"),
+ resource.TestCheckResourceAttr("vercel_drain.otlp_test", "delivery.type", "otlphttp"),
+ resource.TestCheckResourceAttr("vercel_drain.otlp_test", "delivery.endpoint.traces", "https://otlp.example.com/v1/traces"),
+ resource.TestCheckResourceAttr("vercel_drain.otlp_test", "delivery.encoding", "proto"),
+ ),
+ },
+ },
+ })
+}
+
+func TestAcc_DrainDataSourceOTLP(t *testing.T) {
+ resource.Test(t, resource.TestCase{
+ ProtoV6ProviderFactories: testAccProtoV6ProviderFactories,
+ Steps: []resource.TestStep{
+ {
+ Config: cfg(testAccDataSourceDrainOTLP()),
+ Check: resource.ComposeAggregateTestCheckFunc(
+ resource.TestCheckResourceAttr("data.vercel_drain.otlp", "name", "otlp-data-source"),
+ resource.TestCheckResourceAttr("data.vercel_drain.otlp", "delivery.type", "otlphttp"),
+ resource.TestCheckResourceAttr("data.vercel_drain.otlp", "delivery.endpoint.traces", "https://otlp.example.com/v1/traces"),
+ resource.TestCheckResourceAttr("data.vercel_drain.otlp", "delivery.encoding", "proto"),
+ ),
+ },
+ },
+ })
+}
+
+func testAccResourceDrain(name string) string {
+ return fmt.Sprintf(`
+resource "vercel_project" "test" {
+ name = "test-acc-%[1]s"
+}
+
+resource "vercel_drain" "minimal" {
+ name = "minimal-drain"
+ projects = "all"
+ schemas = {
+ log = {
+ version = "v1"
+ }
+ }
+ delivery = {
+ type = "http"
+ endpoint = {
+ url = "https://example.com/webhook"
+ }
+ encoding = "json"
+ headers = {}
+ }
+}
+
+resource "vercel_drain" "maximal" {
+ name = "maximal-drain"
+ projects = "some"
+ project_ids = [vercel_project.test.id]
+ filter = "level >= 'info'"
+ schemas = {
+ log = {
+ version = "v1"
+ }
+ trace = {
+ version = "v1"
+ }
+ }
+ delivery = {
+ type = "http"
+ endpoint = {
+ url = "https://example.com/drain"
+ }
+ encoding = "ndjson"
+ compression = "gzip"
+ headers = {
+ "Authorization" = "Bearer token123"
+ "Content-Type" = "application/x-ndjson"
+ }
+ secret = "secret123"
+ }
+ sampling = [
+ {
+ type = "head_sampling"
+ rate = 0.1
+ environment = "production"
+ },
+ {
+ type = "head_sampling"
+ rate = 0.5
+ environment = "preview"
+ request_path = "/api/"
+ }
+ ]
+ transforms = [
+ {
+ id = "transform1"
+ }
+ ]
+}
+`, name)
+}
+
+func testAccResourceDrainInitial() string {
+ return `
+resource "vercel_drain" "update_test" {
+ name = "initial-name"
+ projects = "all"
+ schemas = {
+ log = {
+ version = "v1"
+ }
+ }
+ delivery = {
+ type = "http"
+ endpoint = {
+ url = "https://example.com/webhook"
+ }
+ encoding = "json"
+ headers = {}
+ }
+}`
+}
+
+func testAccResourceDrainUpdated() string {
+ return `
+resource "vercel_drain" "update_test" {
+ name = "updated-name"
+ projects = "all"
+ schemas = {
+ log = {
+ version = "v1"
+ }
+ }
+ delivery = {
+ type = "http"
+ endpoint = {
+ url = "https://example.com/webhook"
+ }
+ encoding = "ndjson"
+ headers = {}
+ }
+}`
+}
+
+func testAccDataSourceDrain() string {
+ return `
+resource "vercel_drain" "for_data_source" {
+ name = "data-source-test"
+ projects = "all"
+ schemas = {
+ log = {
+ version = "v1"
+ }
+ }
+ delivery = {
+ type = "http"
+ endpoint = {
+ url = "https://example.com/webhook"
+ }
+ encoding = "json"
+ headers = {}
+ }
+}
+
+data "vercel_drain" "test" {
+ id = vercel_drain.for_data_source.id
+}`
+}
+
+func testAccResourceDrainOTLP() string {
+ return `
+resource "vercel_drain" "otlp_test" {
+ name = "otlp-drain"
+ projects = "all"
+ schemas = {
+ trace = {
+ version = "v1"
+ }
+ }
+ delivery = {
+ type = "otlphttp"
+ endpoint = {
+ traces = "https://otlp.example.com/v1/traces"
+ }
+ encoding = "proto"
+ headers = {}
+ }
+}`
+}
+
+func testAccDataSourceDrainOTLP() string {
+ return `
+resource "vercel_drain" "for_otlp_data_source" {
+ name = "otlp-data-source"
+ projects = "all"
+ schemas = {
+ trace = {
+ version = "v1"
+ }
+ }
+ delivery = {
+ type = "otlphttp"
+ endpoint = {
+ traces = "https://otlp.example.com/v1/traces"
+ }
+ encoding = "proto"
+ headers = {}
+ }
+}
+
+data "vercel_drain" "otlp" {
+ id = vercel_drain.for_otlp_data_source.id
+}`
+}